aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2015-02-13 14:40:36 -0500
committerRuss Cox <rsc@golang.org>2015-02-17 23:28:51 +0000
commit8c195bdf120ea86ddda42df89df7bfba80afdf10 (patch)
treee542e4476cdf428912a89cfed28291e0beb4acfe
parentc11882bc3e8f6ab4eea79d7bbd0647b31dad7482 (diff)
downloadgo-8c195bdf120ea86ddda42df89df7bfba80afdf10.tar.gz
go-8c195bdf120ea86ddda42df89df7bfba80afdf10.zip
[dev.cc] cmd/internal/gc, cmd/new6g etc: convert from cmd/gc, cmd/6g etc
First draft of converted Go compiler, using rsc.io/c2go rev 83d795a. Change-Id: I29f4c7010de07d2ff1947bbca9865879d83c32c3 Reviewed-on: https://go-review.googlesource.com/4851 Reviewed-by: Rob Pike <r@golang.org>
-rw-r--r--src/cmd/internal/gc/align.go735
-rw-r--r--src/cmd/internal/gc/array.go9
-rw-r--r--src/cmd/internal/gc/bits.go167
-rw-r--r--src/cmd/internal/gc/builtin.go6
-rw-r--r--src/cmd/internal/gc/bv.go208
-rw-r--r--src/cmd/internal/gc/closure.go696
-rw-r--r--src/cmd/internal/gc/const.go1764
-rw-r--r--src/cmd/internal/gc/cplx.go503
-rw-r--r--src/cmd/internal/gc/dcl.go1565
-rw-r--r--src/cmd/internal/gc/esc.go1437
-rw-r--r--src/cmd/internal/gc/export.go596
-rw-r--r--src/cmd/internal/gc/fmt.go1953
-rw-r--r--src/cmd/internal/gc/gen.go1017
-rw-r--r--src/cmd/internal/gc/go.go1179
-rw-r--r--src/cmd/internal/gc/go.y2252
-rw-r--r--src/cmd/internal/gc/gsubr.go617
-rw-r--r--src/cmd/internal/gc/init.go232
-rw-r--r--src/cmd/internal/gc/inl.go1040
-rw-r--r--src/cmd/internal/gc/lex.go3204
-rw-r--r--src/cmd/internal/gc/md5.go329
-rw-r--r--src/cmd/internal/gc/mparith1.go698
-rw-r--r--src/cmd/internal/gc/mparith2.go728
-rw-r--r--src/cmd/internal/gc/mparith3.go377
-rw-r--r--src/cmd/internal/gc/obj.go481
-rw-r--r--src/cmd/internal/gc/opnames.go162
-rw-r--r--src/cmd/internal/gc/order.go1188
-rw-r--r--src/cmd/internal/gc/pgen.go597
-rw-r--r--src/cmd/internal/gc/plive.go2018
-rw-r--r--src/cmd/internal/gc/popt.go1283
-rw-r--r--src/cmd/internal/gc/racewalk.go681
-rw-r--r--src/cmd/internal/gc/range.go426
-rw-r--r--src/cmd/internal/gc/reflect.go1746
-rw-r--r--src/cmd/internal/gc/reg.go1401
-rw-r--r--src/cmd/internal/gc/select.go389
-rw-r--r--src/cmd/internal/gc/sinit.go1602
-rw-r--r--src/cmd/internal/gc/subr.go3932
-rw-r--r--src/cmd/internal/gc/swt.go1028
-rw-r--r--src/cmd/internal/gc/typecheck.go4076
-rw-r--r--src/cmd/internal/gc/unsafe.go178
-rw-r--r--src/cmd/internal/gc/util.go70
-rw-r--r--src/cmd/internal/gc/walk.go4531
-rw-r--r--src/cmd/internal/gc/y.go3524
-rw-r--r--src/cmd/internal/gc/y.output10411
-rw-r--r--src/cmd/internal/obj/ar.go45
-rw-r--r--src/cmd/internal/obj/arm/5.out.go1
-rw-r--r--src/cmd/internal/obj/arm/anames5.go2
-rw-r--r--src/cmd/internal/obj/arm/asm5.go30
-rw-r--r--src/cmd/internal/obj/arm/list5.go3
-rw-r--r--src/cmd/internal/obj/arm/obj5.go28
-rw-r--r--src/cmd/internal/obj/data.go34
-rw-r--r--src/cmd/internal/obj/flag.go120
-rw-r--r--src/cmd/internal/obj/fmt.go26
-rw-r--r--src/cmd/internal/obj/go.go87
-rw-r--r--src/cmd/internal/obj/i386/8.out.go1
-rw-r--r--src/cmd/internal/obj/i386/anames8.go4
-rw-r--r--src/cmd/internal/obj/i386/asm8.go15
-rw-r--r--src/cmd/internal/obj/i386/list8.go8
-rw-r--r--src/cmd/internal/obj/i386/obj8.go5
-rw-r--r--src/cmd/internal/obj/i386/util.go4
-rw-r--r--src/cmd/internal/obj/ld.go4
-rw-r--r--src/cmd/internal/obj/libc.go20
-rw-r--r--src/cmd/internal/obj/link.go18
-rw-r--r--src/cmd/internal/obj/mgc0.go37
-rw-r--r--src/cmd/internal/obj/pcln.go44
-rw-r--r--src/cmd/internal/obj/ppc64/9.out.go2
-rw-r--r--src/cmd/internal/obj/ppc64/anames9.go4
-rw-r--r--src/cmd/internal/obj/ppc64/asm9.go2
-rw-r--r--src/cmd/internal/obj/ppc64/list9.go4
-rw-r--r--src/cmd/internal/obj/ppc64/obj9.go8
-rw-r--r--src/cmd/internal/obj/ppc64/util.go4
-rw-r--r--src/cmd/internal/obj/stack.go3
-rw-r--r--src/cmd/internal/obj/sym.go37
-rw-r--r--src/cmd/internal/obj/typekind.go45
-rw-r--r--src/cmd/internal/obj/util.go158
-rw-r--r--src/cmd/internal/obj/x86/6.out.go1
-rw-r--r--src/cmd/internal/obj/x86/anames6.go4
-rw-r--r--src/cmd/internal/obj/x86/asm6.go16
-rw-r--r--src/cmd/internal/obj/x86/list6.go10
-rw-r--r--src/cmd/internal/obj/x86/obj6.go25
-rw-r--r--src/cmd/internal/obj/x86/util.go4
-rw-r--r--src/cmd/new5g/cgen.go2004
-rw-r--r--src/cmd/new5g/cgen64.go836
-rw-r--r--src/cmd/new5g/galign.go84
-rw-r--r--src/cmd/new5g/gg.go32
-rw-r--r--src/cmd/new5g/ggen.go822
-rw-r--r--src/cmd/new5g/gsubr.go1599
-rw-r--r--src/cmd/new5g/peep.go1868
-rw-r--r--src/cmd/new5g/prog.go163
-rw-r--r--src/cmd/new5g/reg.go136
-rw-r--r--src/cmd/new5g/util.go12
-rw-r--r--src/cmd/new6g/cgen.go1889
-rw-r--r--src/cmd/new6g/galign.go109
-rw-r--r--src/cmd/new6g/gg.go24
-rw-r--r--src/cmd/new6g/ggen.go1169
-rw-r--r--src/cmd/new6g/gsubr.go1755
-rw-r--r--src/cmd/new6g/peep.go1077
-rw-r--r--src/cmd/new6g/prog.go272
-rw-r--r--src/cmd/new6g/reg.go144
-rw-r--r--src/cmd/new6g/util.go12
-rw-r--r--src/cmd/new8g/cgen.go1731
-rw-r--r--src/cmd/new8g/cgen64.go609
-rw-r--r--src/cmd/new8g/galign.go84
-rw-r--r--src/cmd/new8g/gg.go34
-rw-r--r--src/cmd/new8g/ggen.go1297
-rw-r--r--src/cmd/new8g/gsubr.go1933
-rw-r--r--src/cmd/new8g/peep.go847
-rw-r--r--src/cmd/new8g/prog.go291
-rw-r--r--src/cmd/new8g/reg.go112
-rw-r--r--src/cmd/new8g/util.go12
-rw-r--r--src/cmd/new9g/cgen.go1889
-rw-r--r--src/cmd/new9g/galign.go92
-rw-r--r--src/cmd/new9g/gg.go28
-rw-r--r--src/cmd/new9g/ggen.go1060
-rw-r--r--src/cmd/new9g/gsubr.go1169
-rw-r--r--src/cmd/new9g/opt.go42
-rw-r--r--src/cmd/new9g/peep.go1071
-rw-r--r--src/cmd/new9g/prog.go318
-rw-r--r--src/cmd/new9g/reg.go164
-rw-r--r--src/cmd/new9g/util.go12
119 files changed, 88448 insertions, 253 deletions
diff --git a/src/cmd/internal/gc/align.go b/src/cmd/internal/gc/align.go
new file mode 100644
index 0000000000..062d35a1da
--- /dev/null
+++ b/src/cmd/internal/gc/align.go
@@ -0,0 +1,735 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * machine size and rounding
+ * alignment is dictated around
+ * the size of a pointer, set in betypeinit
+ * (see ../6g/galign.c).
+ */
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+ if r < 1 || r > 8 || r&(r-1) != 0 {
+ Fatal("rnd %d", r)
+ }
+ return (o + r - 1) &^ (r - 1)
+}
+
+func offmod(t *Type) {
+ var f *Type
+ var o int32
+
+ o = 0
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ f.Width = int64(o)
+ o += int32(Widthptr)
+ if int64(o) >= Thearch.MAXWIDTH {
+ Yyerror("interface too large")
+ o = int32(Widthptr)
+ }
+ }
+}
+
+func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
+ var f *Type
+ var w int64
+ var maxalign int32
+ var starto int64
+ var lastzero int64
+
+ starto = o
+ maxalign = int32(flag)
+ if maxalign < 1 {
+ maxalign = 1
+ }
+ lastzero = 0
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ if f.Type == nil {
+ // broken field, just skip it so that other valid fields
+ // get a width.
+ continue
+ }
+
+ dowidth(f.Type)
+ if int32(f.Type.Align) > maxalign {
+ maxalign = int32(f.Type.Align)
+ }
+ if f.Type.Width < 0 {
+ Fatal("invalid width %d", f.Type.Width)
+ }
+ w = f.Type.Width
+ if f.Type.Align > 0 {
+ o = Rnd(o, int64(f.Type.Align))
+ }
+ f.Width = o // really offset for TFIELD
+ if f.Nname != nil {
+ // this same stackparam logic is in addrescapes
+ // in typecheck.c. usually addrescapes runs after
+ // widstruct, in which case we could drop this,
+ // but function closure functions are the exception.
+ if f.Nname.Stackparam != nil {
+ f.Nname.Stackparam.Xoffset = o
+ f.Nname.Xoffset = 0
+ } else {
+ f.Nname.Xoffset = o
+ }
+ }
+
+ if w == 0 {
+ lastzero = o
+ }
+ o += w
+ if o >= Thearch.MAXWIDTH {
+ Yyerror("type %v too large", Tconv(errtype, obj.FmtLong))
+ o = 8 // small but nonzero
+ }
+ }
+
+ // For nonzero-sized structs which end in a zero-sized thing, we add
+ // an extra byte of padding to the type. This padding ensures that
+ // taking the address of the zero-sized thing can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if flag == 1 && o > starto && o == lastzero {
+ o++
+ }
+
+ // final width is rounded
+ if flag != 0 {
+ o = Rnd(o, int64(maxalign))
+ }
+ t.Align = uint8(maxalign)
+
+ // type width only includes back to first field's offset
+ t.Width = o - starto
+
+ return o
+}
+
+func dowidth(t *Type) {
+ var et int32
+ var w int64
+ var lno int
+ var t1 *Type
+
+ if Widthptr == 0 {
+ Fatal("dowidth without betypeinit")
+ }
+
+ if t == nil {
+ return
+ }
+
+ if t.Width > 0 {
+ return
+ }
+
+ if t.Width == -2 {
+ lno = int(lineno)
+ lineno = int32(t.Lineno)
+ if !(t.Broke != 0) {
+ t.Broke = 1
+ Yyerror("invalid recursive type %v", Tconv(t, 0))
+ }
+
+ t.Width = 0
+ lineno = int32(lno)
+ return
+ }
+
+ // break infinite recursion if the broken recursive type
+ // is referenced again
+ if t.Broke != 0 && t.Width == 0 {
+ return
+ }
+
+ // defer checkwidth calls until after we're done
+ defercalc++
+
+ lno = int(lineno)
+ lineno = int32(t.Lineno)
+ t.Width = -2
+ t.Align = 0
+
+ et = int32(t.Etype)
+ switch et {
+ case TFUNC,
+ TCHAN,
+ TMAP,
+ TSTRING:
+ break
+
+ /* simtype == 0 during bootstrap */
+ default:
+ if Simtype[t.Etype] != 0 {
+ et = int32(Simtype[t.Etype])
+ }
+ }
+
+ w = 0
+ switch et {
+ default:
+ Fatal("dowidth: unknown type: %v", Tconv(t, 0))
+
+ /* compiler-specific stuff */
+ case TINT8,
+ TUINT8,
+ TBOOL:
+ // bool is int8
+ w = 1
+
+ case TINT16,
+ TUINT16:
+ w = 2
+
+ case TINT32,
+ TUINT32,
+ TFLOAT32:
+ w = 4
+
+ case TINT64,
+ TUINT64,
+ TFLOAT64,
+ TCOMPLEX64:
+ w = 8
+ t.Align = uint8(Widthreg)
+
+ case TCOMPLEX128:
+ w = 16
+ t.Align = uint8(Widthreg)
+
+ case TPTR32:
+ w = 4
+ checkwidth(t.Type)
+
+ case TPTR64:
+ w = 8
+ checkwidth(t.Type)
+
+ case TUNSAFEPTR:
+ w = int64(Widthptr)
+
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(Widthptr)
+
+ t.Align = uint8(Widthptr)
+ offmod(t)
+
+ case TCHAN: // implemented as pointer
+ w = int64(Widthptr)
+
+ checkwidth(t.Type)
+
+ // make fake type to check later to
+ // trigger channel argument check.
+ t1 = typ(TCHANARGS)
+
+ t1.Type = t
+ checkwidth(t1)
+
+ case TCHANARGS:
+ t1 = t.Type
+ dowidth(t.Type) // just in case
+ if t1.Type.Width >= 1<<16 {
+ Yyerror("channel element type too large (>64kB)")
+ }
+ t.Width = 1
+
+ case TMAP: // implemented as pointer
+ w = int64(Widthptr)
+
+ checkwidth(t.Type)
+ checkwidth(t.Down)
+
+ case TFORW: // should have been filled in
+ if !(t.Broke != 0) {
+ Yyerror("invalid recursive type %v", Tconv(t, 0))
+ }
+ w = 1 // anything will do
+
+ // dummy type; should be replaced before use.
+ case TANY:
+ if !(Debug['A'] != 0) {
+ Fatal("dowidth any")
+ }
+ w = 1 // anything will do
+
+ case TSTRING:
+ if sizeof_String == 0 {
+ Fatal("early dowidth string")
+ }
+ w = int64(sizeof_String)
+ t.Align = uint8(Widthptr)
+
+ case TARRAY:
+ if t.Type == nil {
+ break
+ }
+ if t.Bound >= 0 {
+ var cap uint64
+
+ dowidth(t.Type)
+ if t.Type.Width != 0 {
+ cap = (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
+ if uint64(t.Bound) > cap {
+ Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
+ }
+ }
+
+ w = t.Bound * t.Type.Width
+ t.Align = t.Type.Align
+ } else if t.Bound == -1 {
+ w = int64(sizeof_Array)
+ checkwidth(t.Type)
+ t.Align = uint8(Widthptr)
+ } else if t.Bound == -100 {
+ if !(t.Broke != 0) {
+ Yyerror("use of [...] array outside of array literal")
+ t.Broke = 1
+ }
+ } else {
+ Fatal("dowidth %v", Tconv(t, 0)) // probably [...]T
+ }
+
+ case TSTRUCT:
+ if t.Funarg != 0 {
+ Fatal("dowidth fn struct %v", Tconv(t, 0))
+ }
+ w = widstruct(t, t, 0, 1)
+
+ // make fake type to check later to
+ // trigger function argument computation.
+ case TFUNC:
+ t1 = typ(TFUNCARGS)
+
+ t1.Type = t
+ checkwidth(t1)
+
+ // width of func type is pointer
+ w = int64(Widthptr)
+
+ // function is 3 cated structures;
+ // compute their widths as side-effect.
+ case TFUNCARGS:
+ t1 = t.Type
+
+ w = widstruct(t.Type, *getthis(t1), 0, 0)
+ w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
+ w = widstruct(t.Type, *Getoutarg(t1), w, Widthreg)
+ t1.Argwid = w
+ if w%int64(Widthreg) != 0 {
+ Warn("bad type %v %d\n", Tconv(t1, 0), w)
+ }
+ t.Align = 1
+ }
+
+ if Widthptr == 4 && w != int64(int32(w)) {
+ Yyerror("type %v too large", Tconv(t, 0))
+ }
+
+ t.Width = w
+ if t.Align == 0 {
+ if w > 8 || w&(w-1) != 0 {
+ Fatal("invalid alignment for %v", Tconv(t, 0))
+ }
+ t.Align = uint8(w)
+ }
+
+ lineno = int32(lno)
+
+ if defercalc == 1 {
+ resumecheckwidth()
+ } else {
+ defercalc--
+ }
+}
+
+/*
+ * when a type's width should be known, we call checkwidth
+ * to compute it. during a declaration like
+ *
+ * type T *struct { next T }
+ *
+ * it is necessary to defer the calculation of the struct width
+ * until after T has been initialized to be a pointer to that struct.
+ * similarly, during import processing structs may be used
+ * before their definition. in those situations, calling
+ * defercheckwidth() stops width calculations until
+ * resumecheckwidth() is called, at which point all the
+ * checkwidths that were deferred are executed.
+ * dowidth should only be called when the type's size
+ * is needed immediately. checkwidth makes sure the
+ * size is evaluated eventually.
+ */
+type TypeList struct {
+ t *Type
+ next *TypeList
+}
+
+var tlfree *TypeList
+
+var tlq *TypeList
+
+func checkwidth(t *Type) {
+ var l *TypeList
+
+ if t == nil {
+ return
+ }
+
+ // function arg structs should not be checked
+ // outside of the enclosing function.
+ if t.Funarg != 0 {
+ Fatal("checkwidth %v", Tconv(t, 0))
+ }
+
+ if !(defercalc != 0) {
+ dowidth(t)
+ return
+ }
+
+ if t.Deferwidth != 0 {
+ return
+ }
+ t.Deferwidth = 1
+
+ l = tlfree
+ if l != nil {
+ tlfree = l.next
+ } else {
+ l = new(TypeList)
+ }
+
+ l.t = t
+ l.next = tlq
+ tlq = l
+}
+
+func defercheckwidth() {
+ // we get out of sync on syntax errors, so don't be pedantic.
+ if defercalc != 0 && nerrors == 0 {
+ Fatal("defercheckwidth")
+ }
+ defercalc = 1
+}
+
+func resumecheckwidth() {
+ var l *TypeList
+
+ if !(defercalc != 0) {
+ Fatal("resumecheckwidth")
+ }
+ for l = tlq; l != nil; l = tlq {
+ l.t.Deferwidth = 0
+ tlq = l.next
+ dowidth(l.t)
+ l.next = tlfree
+ tlfree = l
+ }
+
+ defercalc = 0
+}
+
+func typeinit() {
+ var i int
+ var etype int
+ var sameas int
+ var t *Type
+ var s *Sym
+ var s1 *Sym
+
+ if Widthptr == 0 {
+ Fatal("typeinit before betypeinit")
+ }
+
+ for i = 0; i < NTYPE; i++ {
+ Simtype[i] = uint8(i)
+ }
+
+ Types[TPTR32] = typ(TPTR32)
+ dowidth(Types[TPTR32])
+
+ Types[TPTR64] = typ(TPTR64)
+ dowidth(Types[TPTR64])
+
+ t = typ(TUNSAFEPTR)
+ Types[TUNSAFEPTR] = t
+ t.Sym = Pkglookup("Pointer", unsafepkg)
+ t.Sym.Def = typenod(t)
+
+ dowidth(Types[TUNSAFEPTR])
+
+ Tptr = TPTR32
+ if Widthptr == 8 {
+ Tptr = TPTR64
+ }
+
+ for i = TINT8; i <= TUINT64; i++ {
+ Isint[i] = 1
+ }
+ Isint[TINT] = 1
+ Isint[TUINT] = 1
+ Isint[TUINTPTR] = 1
+
+ Isfloat[TFLOAT32] = 1
+ Isfloat[TFLOAT64] = 1
+
+ Iscomplex[TCOMPLEX64] = 1
+ Iscomplex[TCOMPLEX128] = 1
+
+ Isptr[TPTR32] = 1
+ Isptr[TPTR64] = 1
+
+ isforw[TFORW] = 1
+
+ Issigned[TINT] = 1
+ Issigned[TINT8] = 1
+ Issigned[TINT16] = 1
+ Issigned[TINT32] = 1
+ Issigned[TINT64] = 1
+
+ /*
+ * initialize okfor
+ */
+ for i = 0; i < NTYPE; i++ {
+ if Isint[i] != 0 || i == TIDEAL {
+ okforeq[i] = 1
+ okforcmp[i] = 1
+ okforarith[i] = 1
+ okforadd[i] = 1
+ okforand[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ Minintval[i] = new(Mpint)
+ Maxintval[i] = new(Mpint)
+ }
+
+ if Isfloat[i] != 0 {
+ okforeq[i] = 1
+ okforcmp[i] = 1
+ okforadd[i] = 1
+ okforarith[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ minfltval[i] = new(Mpflt)
+ maxfltval[i] = new(Mpflt)
+ }
+
+ if Iscomplex[i] != 0 {
+ okforeq[i] = 1
+ okforadd[i] = 1
+ okforarith[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ }
+ }
+
+ issimple[TBOOL] = 1
+
+ okforadd[TSTRING] = 1
+
+ okforbool[TBOOL] = 1
+
+ okforcap[TARRAY] = 1
+ okforcap[TCHAN] = 1
+
+ okforconst[TBOOL] = 1
+ okforconst[TSTRING] = 1
+
+ okforlen[TARRAY] = 1
+ okforlen[TCHAN] = 1
+ okforlen[TMAP] = 1
+ okforlen[TSTRING] = 1
+
+ okforeq[TPTR32] = 1
+ okforeq[TPTR64] = 1
+ okforeq[TUNSAFEPTR] = 1
+ okforeq[TINTER] = 1
+ okforeq[TCHAN] = 1
+ okforeq[TSTRING] = 1
+ okforeq[TBOOL] = 1
+ okforeq[TMAP] = 1 // nil only; refined in typecheck
+ okforeq[TFUNC] = 1 // nil only; refined in typecheck
+ okforeq[TARRAY] = 1 // nil slice only; refined in typecheck
+ okforeq[TSTRUCT] = 1 // it's complicated; refined in typecheck
+
+ okforcmp[TSTRING] = 1
+
+ for i = 0; i < len(okfor); i++ {
+ okfor[i] = okfornone[:]
+ }
+
+ // binary
+ okfor[OADD] = okforadd[:]
+
+ okfor[OAND] = okforand[:]
+ okfor[OANDAND] = okforbool[:]
+ okfor[OANDNOT] = okforand[:]
+ okfor[ODIV] = okforarith[:]
+ okfor[OEQ] = okforeq[:]
+ okfor[OGE] = okforcmp[:]
+ okfor[OGT] = okforcmp[:]
+ okfor[OLE] = okforcmp[:]
+ okfor[OLT] = okforcmp[:]
+ okfor[OMOD] = okforand[:]
+ okfor[OMUL] = okforarith[:]
+ okfor[ONE] = okforeq[:]
+ okfor[OOR] = okforand[:]
+ okfor[OOROR] = okforbool[:]
+ okfor[OSUB] = okforarith[:]
+ okfor[OXOR] = okforand[:]
+ okfor[OLSH] = okforand[:]
+ okfor[ORSH] = okforand[:]
+
+ // unary
+ okfor[OCOM] = okforand[:]
+
+ okfor[OMINUS] = okforarith[:]
+ okfor[ONOT] = okforbool[:]
+ okfor[OPLUS] = okforarith[:]
+
+ // special
+ okfor[OCAP] = okforcap[:]
+
+ okfor[OLEN] = okforlen[:]
+
+ // comparison
+ iscmp[OLT] = 1
+
+ iscmp[OGT] = 1
+ iscmp[OGE] = 1
+ iscmp[OLE] = 1
+ iscmp[OEQ] = 1
+ iscmp[ONE] = 1
+
+ mpatofix(Maxintval[TINT8], "0x7f")
+ mpatofix(Minintval[TINT8], "-0x80")
+ mpatofix(Maxintval[TINT16], "0x7fff")
+ mpatofix(Minintval[TINT16], "-0x8000")
+ mpatofix(Maxintval[TINT32], "0x7fffffff")
+ mpatofix(Minintval[TINT32], "-0x80000000")
+ mpatofix(Maxintval[TINT64], "0x7fffffffffffffff")
+ mpatofix(Minintval[TINT64], "-0x8000000000000000")
+
+ mpatofix(Maxintval[TUINT8], "0xff")
+ mpatofix(Maxintval[TUINT16], "0xffff")
+ mpatofix(Maxintval[TUINT32], "0xffffffff")
+ mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
+
+ /* f is valid float if min < f < max. (min and max are not themselves valid.) */
+ mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
+ mpatoflt(minfltval[TFLOAT32], "-33554431p103")
+ mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
+ mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
+
+ maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
+ minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
+ maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
+ minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
+
+ /* for walk to use in error messages */
+ Types[TFUNC] = functype(nil, nil, nil)
+
+ /* types used in front end */
+ // types[TNIL] got set early in lexinit
+ Types[TIDEAL] = typ(TIDEAL)
+
+ Types[TINTER] = typ(TINTER)
+
+ /* simple aliases */
+ Simtype[TMAP] = uint8(Tptr)
+
+ Simtype[TCHAN] = uint8(Tptr)
+ Simtype[TFUNC] = uint8(Tptr)
+ Simtype[TUNSAFEPTR] = uint8(Tptr)
+
+ /* pick up the backend thearch.typedefs */
+ for i = range Thearch.Typedefs {
+ s = Lookup(Thearch.Typedefs[i].Name)
+ s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
+
+ etype = Thearch.Typedefs[i].Etype
+ if etype < 0 || etype >= len(Types) {
+ Fatal("typeinit: %s bad etype", s.Name)
+ }
+ sameas = Thearch.Typedefs[i].Sameas
+ if sameas < 0 || sameas >= len(Types) {
+ Fatal("typeinit: %s bad sameas", s.Name)
+ }
+ Simtype[etype] = uint8(sameas)
+ minfltval[etype] = minfltval[sameas]
+ maxfltval[etype] = maxfltval[sameas]
+ Minintval[etype] = Minintval[sameas]
+ Maxintval[etype] = Maxintval[sameas]
+
+ t = Types[etype]
+ if t != nil {
+ Fatal("typeinit: %s already defined", s.Name)
+ }
+
+ t = typ(etype)
+ t.Sym = s1
+
+ dowidth(t)
+ Types[etype] = t
+ s1.Def = typenod(t)
+ }
+
+ Array_array = int(Rnd(0, int64(Widthptr)))
+ Array_nel = int(Rnd(int64(Array_array)+int64(Widthptr), int64(Widthint)))
+ Array_cap = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthint)))
+ sizeof_Array = int(Rnd(int64(Array_cap)+int64(Widthint), int64(Widthptr)))
+
+ // string is same as slice wo the cap
+ sizeof_String = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthptr)))
+
+ dowidth(Types[TSTRING])
+ dowidth(idealstring)
+}
+
+/*
+ * compute total size of f's in/out arguments.
+ */
+func Argsize(t *Type) int {
+ var save Iter
+ var fp *Type
+ var w int64
+ var x int64
+
+ w = 0
+
+ fp = Structfirst(&save, Getoutarg(t))
+ for fp != nil {
+ x = fp.Width + fp.Type.Width
+ if x > w {
+ w = x
+ }
+ fp = structnext(&save)
+ }
+
+ fp = funcfirst(&save, t)
+ for fp != nil {
+ x = fp.Width + fp.Type.Width
+ if x > w {
+ w = x
+ }
+ fp = funcnext(&save)
+ }
+
+ w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
+ if int64(int(w)) != w {
+ Fatal("argsize too big")
+ }
+ return int(w)
+}
diff --git a/src/cmd/internal/gc/array.go b/src/cmd/internal/gc/array.go
new file mode 100644
index 0000000000..04086138b3
--- /dev/null
+++ b/src/cmd/internal/gc/array.go
@@ -0,0 +1,9 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+const (
+ DEFAULTCAPACITY = 16
+)
diff --git a/src/cmd/internal/gc/bits.go b/src/cmd/internal/gc/bits.go
new file mode 100644
index 0000000000..613e15d58b
--- /dev/null
+++ b/src/cmd/internal/gc/bits.go
@@ -0,0 +1,167 @@
+// Inferno utils/cc/bits.c
+// http://code.google.com/p/inferno-os/source/browse/utils/cc/bits.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import "fmt"
+
+/*
+Bits
+bor(Bits a, Bits b)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = a.b[i] | b.b[i];
+ return c;
+}
+
+Bits
+band(Bits a, Bits b)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = a.b[i] & b.b[i];
+ return c;
+}
+
+Bits
+bnot(Bits a)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = ~a.b[i];
+ return c;
+}
+*/
+func bany(a *Bits) int {
+ var i int
+
+ for i = 0; i < BITS; i++ {
+ if a.b[i] != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+int
+beq(Bits a, Bits b)
+{
+ int i;
+
+ for(i=0; i<BITS; i++)
+ if(a.b[i] != b.b[i])
+ return 0;
+ return 1;
+}
+*/
+func bnum(a Bits) int {
+ var i int
+ var b uint64
+
+ for i = 0; i < BITS; i++ {
+ b = a.b[i]
+ if b != 0 {
+ return 64*i + Bitno(b)
+ }
+ }
+
+ Fatal("bad in bnum")
+ return 0
+}
+
+func blsh(n uint) Bits {
+ var c Bits
+
+ c = zbits
+ c.b[n/64] = 1 << (n % 64)
+ return c
+}
+
+func btest(a *Bits, n uint) int {
+ return bool2int(a.b[n/64]&(1<<(n%64)) != 0)
+}
+
+func biset(a *Bits, n uint) {
+ a.b[n/64] |= 1 << (n % 64)
+}
+
+func biclr(a *Bits, n uint) {
+ a.b[n/64] &^= (1 << (n % 64))
+}
+
+func Bitno(b uint64) int {
+ var i int
+
+ for i = 0; i < 64; i++ {
+ if b&(1<<uint(i)) != 0 {
+ return i
+ }
+ }
+ Fatal("bad in bitno")
+ return 0
+}
+
+func Qconv(bits Bits, flag int) string {
+ var fp string
+
+ var i int
+ var first int
+
+ first = 1
+
+ for bany(&bits) != 0 {
+ i = bnum(bits)
+ if first != 0 {
+ first = 0
+ } else {
+ fp += fmt.Sprintf(" ")
+ }
+ if var_[i].node == nil || var_[i].node.Sym == nil {
+ fp += fmt.Sprintf("$%d", i)
+ } else {
+ fp += fmt.Sprintf("%s(%d)", var_[i].node.Sym.Name, i)
+ if var_[i].offset != 0 {
+ fp += fmt.Sprintf("%+d", int64(var_[i].offset))
+ }
+ }
+
+ biclr(&bits, uint(i))
+ }
+
+ return fp
+}
diff --git a/src/cmd/internal/gc/builtin.go b/src/cmd/internal/gc/builtin.go
new file mode 100644
index 0000000000..8742d4bb37
--- /dev/null
+++ b/src/cmd/internal/gc/builtin.go
@@ -0,0 +1,6 @@
+package gc
+
+// AUTO-GENERATED by mkbuiltin; DO NOT EDIT
+var runtimeimport string = "package runtime\n" + "import runtime \"runtime\"\n" + "func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" + "func @\"\".panicindex ()\n" + "func @\"\".panicslice ()\n" + "func @\"\".panicdivide ()\n" + "func @\"\".throwreturn ()\n" + "func @\"\".throwinit ()\n" + "func @\"\".panicwrap (? string, ? string, ? string)\n" + "func @\"\".gopanic (? interface {})\n" + "func @\"\".gorecover (? *int32) (? interface {})\n" + "func @\"\".printbool (? bool)\n" + "func @\"\".printfloat (? float64)\n" + "func @\"\".printint (? int64)\n" + "func @\"\".printhex (? uint64)\n" + "func @\"\".printuint (? uint64)\n" + "func @\"\".printcomplex (? complex128)\n" + "func @\"\".printstring (? string)\n" + "func @\"\".printpointer (? any)\n" + "func @\"\".printiface (? any)\n" + "func @\"\".printeface (? any)\n" + "func @\"\".printslice (? any)\n" + "func @\"\".printnl ()\n" + "func @\"\".printsp ()\n" + "func @\"\".printlock ()\n" + "func @\"\".printunlock ()\n" + "func @\"\".concatstring2 (? *[32]byte, ? string, ? string) (? string)\n" + "func @\"\".concatstring3 (? *[32]byte, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstring4 (? *[32]byte, ? string, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstring5 (? *[32]byte, ? string, ? string, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstrings (? *[32]byte, ? []string) (? string)\n" + "func @\"\".cmpstring (? string, ? string) (? int)\n" + "func @\"\".eqstring (? string, ? string) (? bool)\n" + "func @\"\".intstring (? *[4]byte, ? int64) (? string)\n" + "func @\"\".slicebytetostring (? *[32]byte, ? []byte) (? string)\n" + "func @\"\".slicebytetostringtmp (? []byte) (? string)\n" + "func @\"\".slicerunetostring (? *[32]byte, ? []rune) (? string)\n" + "func @\"\".stringtoslicebyte (? *[32]byte, ? string) (? []byte)\n" + "func @\"\".stringtoslicebytetmp (? string) (? []byte)\n" + "func @\"\".stringtoslicerune (? *[32]rune, ? string) (? []rune)\n" + "func @\"\".stringiter (? string, ? int) (? int)\n" + "func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" + "func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr) (? int)\n" + "func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" + "func @\"\".typ2Itab (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte) (@\"\".ret·1 *byte)\n" + "func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" + "func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" + "func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any) (@\"\".ret·1 any)\n" + "func @\"\".convT2I (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte, @\"\".elem·5 *any) (@\"\".ret·1 any)\n" + "func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertE2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".ifaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + "func @\"\".efaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + "func @\"\".ifacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" + "func @\"\".efacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" + "func @\"\".makemap (@\"\".mapType·2 *byte, @\"\".hint·3 int64, @\"\".mapbuf·4 *any, @\"\".bucketbuf·5 *any) (@\"\".hmap·1 map[any]any)\n" + "func @\"\".mapaccess1 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_fast32 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_fast64 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_faststr (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess2 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast32 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast64 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_faststr (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapassign1 (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any, @\"\".val·4 *any)\n" + "func @\"\".mapiterinit (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".hiter·3 *any)\n" + "func @\"\".mapdelete (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any)\n" + "func @\"\".mapiternext (@\"\".hiter·1 *any)\n" + "func @\"\".makechan (@\"\".chanType·2 *byte, @\"\".hint·3 int64) (@\"\".hchan·1 chan any)\n" + "func @\"\".chanrecv1 (@\"\".chanType·1 *byte, @\"\".hchan·2 <-chan any, @\"\".elem·3 *any)\n" + "func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" + "func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" + "func @\"\".closechan (@\"\".hchan·1 any)\n" + "func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrieriface (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierfat01 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat10 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat11 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1000 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" + "func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" + "func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" + "func @\"\".selectnbrecv (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".hchan·4 <-chan any) (? bool)\n" + "func @\"\".selectnbrecv2 (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".received·4 *bool, @\"\".hchan·5 <-chan any) (? bool)\n" + "func @\"\".newselect (@\"\".sel·1 *byte, @\"\".selsize·2 int64, @\"\".size·3 int32)\n" + "func @\"\".selectsend (@\"\".sel·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + "func @\"\".selectrecv (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + "func @\"\".selectrecv2 (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any, @\"\".received·5 *bool) (@\"\".selected·1 bool)\n" + "func @\"\".selectdefault (@\"\".sel·2 *byte) (@\"\".selected·1 bool)\n" + "func @\"\".selectgo (@\"\".sel·1 *byte)\n" + "func @\"\".block ()\n" + "func @\"\".makeslice (@\"\".typ·2 *byte, @\"\".nel·3 int64, @\"\".cap·4 int64) (@\"\".ary·1 []any)\n" + "func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".n·4 int64) (@\"\".ary·1 []any)\n" + "func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr)\n" + "func @\"\".memclr (@\"\".ptr·1 *byte, @\"\".length·2 uintptr)\n" + "func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n" + "func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".int64div (? int64, ? int64) (? int64)\n" + "func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n" + "func @\"\".int64mod (? int64, ? int64) (? int64)\n" + "func @\"\".uint64mod (? uint64, ? uint64) (? uint64)\n" + "func @\"\".float64toint64 (? float64) (? int64)\n" + "func @\"\".float64touint64 (? float64) (? uint64)\n" + "func @\"\".int64tofloat64 (? int64) (? float64)\n" + "func @\"\".uint64tofloat64 (? uint64) (? float64)\n" + "func @\"\".complex128div (@\"\".num·2 complex128, @\"\".den·3 complex128) (@\"\".quo·1 complex128)\n" + "func @\"\".racefuncenter (? uintptr)\n" + "func @\"\".racefuncexit ()\n" + "func @\"\".raceread (? uintptr)\n" + "func @\"\".racewrite (? uintptr)\n" + "func @\"\".racereadrange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" + "func @\"\".racewriterange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" + "\n" + "$$\n"
+
+var unsafeimport string = "package unsafe\n" + "import runtime \"runtime\"\n" + "type @\"\".Pointer uintptr\n" + "func @\"\".Offsetof (? any) (? uintptr)\n" + "func @\"\".Sizeof (? any) (? uintptr)\n" + "func @\"\".Alignof (? any) (? uintptr)\n" + "\n" + "$$\n"
diff --git a/src/cmd/internal/gc/bv.go b/src/cmd/internal/gc/bv.go
new file mode 100644
index 0000000000..998a1f58c3
--- /dev/null
+++ b/src/cmd/internal/gc/bv.go
@@ -0,0 +1,208 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "fmt"
+
+const (
+ WORDSIZE = 4
+ WORDBITS = 32
+ WORDMASK = WORDBITS - 1
+ WORDSHIFT = 5
+)
+
+func bvsize(n uint32) uint32 {
+ return ((n + WORDBITS - 1) / WORDBITS) * WORDSIZE
+}
+
+func bvbits(bv *Bvec) int32 {
+ return bv.n
+}
+
+func bvwords(bv *Bvec) int32 {
+ return (bv.n + WORDBITS - 1) / WORDBITS
+}
+
+func bvalloc(n int32) *Bvec {
+ return &Bvec{n, make([]uint32, bvsize(uint32(n))/4)}
+}
+
+/* difference */
+func bvandnot(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvand: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] &^ src2.b[w]
+ }
+}
+
+func bvcmp(bv1 *Bvec, bv2 *Bvec) int {
+ if bv1.n != bv2.n {
+ Fatal("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ }
+ for i, x := range bv1.b {
+ if x != bv2.b[i] {
+ return 1
+ }
+ }
+ return 0
+}
+
+func bvcopy(dst *Bvec, src *Bvec) {
+ for i, x := range src.b {
+ dst.b[i] = x
+ }
+}
+
+func bvconcat(src1 *Bvec, src2 *Bvec) *Bvec {
+ var dst *Bvec
+ var i int32
+
+ dst = bvalloc(src1.n + src2.n)
+ for i = 0; i < src1.n; i++ {
+ if bvget(src1, i) != 0 {
+ bvset(dst, i)
+ }
+ }
+ for i = 0; i < src2.n; i++ {
+ if bvget(src2, i) != 0 {
+ bvset(dst, i+src1.n)
+ }
+ }
+ return dst
+}
+
+func bvget(bv *Bvec, i int32) int {
+ if i < 0 || i >= bv.n {
+ Fatal("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ return int((bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)) & 1)
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func bvnext(bv *Bvec, i int32) int {
+ var w uint32
+
+ if i >= bv.n {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
+ i &^= WORDMASK
+ i += WORDBITS
+ for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
+ i += WORDBITS
+ }
+ }
+
+ if i >= bv.n {
+ return -1
+ }
+
+ // Find 1 bit.
+ w = bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+
+ for w&1 == 0 {
+ w >>= 1
+ i++
+ }
+
+ return int(i)
+}
+
+func bvisempty(bv *Bvec) int {
+ var i int32
+
+ for i = 0; i < bv.n; i += WORDBITS {
+ if bv.b[i>>WORDSHIFT] != 0 {
+ return 0
+ }
+ }
+ return 1
+}
+
+func bvnot(bv *Bvec) {
+ var i int32
+ var w int32
+
+ i = 0
+ w = 0
+ for ; i < bv.n; (func() { i += WORDBITS; w++ })() {
+ bv.b[w] = ^bv.b[w]
+ }
+}
+
+/* union */
+func bvor(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] | src2.b[w]
+ }
+}
+
+/* intersection */
+func bvand(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] & src2.b[w]
+ }
+}
+
+func bvprint(bv *Bvec) {
+ var i int32
+
+ fmt.Printf("#*")
+ for i = 0; i < bv.n; i++ {
+ fmt.Printf("%d", bvget(bv, i))
+ }
+}
+
+func bvreset(bv *Bvec, i int32) {
+ var mask uint32
+
+ if i < 0 || i >= bv.n {
+ Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask = ^(1 << uint(i%WORDBITS))
+ bv.b[i/WORDBITS] &= mask
+}
+
+func bvresetall(bv *Bvec) {
+ for i := range bv.b {
+ bv.b[i] = 0
+ }
+}
+
+func bvset(bv *Bvec, i int32) {
+ var mask uint32
+
+ if i < 0 || i >= bv.n {
+ Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask = 1 << uint(i%WORDBITS)
+ bv.b[i/WORDBITS] |= mask
+}
diff --git a/src/cmd/internal/gc/closure.go b/src/cmd/internal/gc/closure.go
new file mode 100644
index 0000000000..5a1ae65479
--- /dev/null
+++ b/src/cmd/internal/gc/closure.go
@@ -0,0 +1,696 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * function literals aka closures
+ */
+func closurehdr(ntype *Node) {
+ var n *Node
+ var name *Node
+ var a *Node
+ var l *NodeList
+
+ n = Nod(OCLOSURE, nil, nil)
+ n.Ntype = ntype
+ n.Funcdepth = Funcdepth
+
+ funchdr(n)
+
+ // steal ntype's argument names and
+ // leave a fresh copy in their place.
+ // references to these variables need to
+ // refer to the variables in the external
+ // function declared below; see walkclosure.
+ n.List = ntype.List
+
+ n.Rlist = ntype.Rlist
+ ntype.List = nil
+ ntype.Rlist = nil
+ for l = n.List; l != nil; l = l.Next {
+ name = l.N.Left
+ if name != nil {
+ name = newname(name.Sym)
+ }
+ a = Nod(ODCLFIELD, name, l.N.Right)
+ a.Isddd = l.N.Isddd
+ if name != nil {
+ name.Isddd = a.Isddd
+ }
+ ntype.List = list(ntype.List, a)
+ }
+
+ for l = n.Rlist; l != nil; l = l.Next {
+ name = l.N.Left
+ if name != nil {
+ name = newname(name.Sym)
+ }
+ ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, name, l.N.Right))
+ }
+}
+
+func closurebody(body *NodeList) *Node {
+ var func_ *Node
+ var v *Node
+ var l *NodeList
+
+ if body == nil {
+ body = list1(Nod(OEMPTY, nil, nil))
+ }
+
+ func_ = Curfn
+ func_.Nbody = body
+ func_.Endlineno = lineno
+ funcbody(func_)
+
+ // closure-specific variables are hanging off the
+ // ordinary ones in the symbol table; see oldname.
+ // unhook them.
+ // make the list of pointers for the closure call.
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ v.Closure.Closure = v.Outer
+ v.Outerexpr = oldname(v.Sym)
+ }
+
+ return func_
+}
+
+func typecheckclosure(func_ *Node, top int) {
+ var oldfn *Node
+ var n *Node
+ var l *NodeList
+ var olddd int
+
+ for l = func_.Cvars; l != nil; l = l.Next {
+ n = l.N.Closure
+ if !(n.Captured != 0) {
+ n.Captured = 1
+ if n.Decldepth == 0 {
+ Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
+ }
+
+ // Ignore assignments to the variable in straightline code
+ // preceding the first capturing by a closure.
+ if n.Decldepth == decldepth {
+ n.Assigned = 0
+ }
+ }
+ }
+
+ for l = func_.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+ l.N.Decldepth = 1
+ }
+ }
+
+ oldfn = Curfn
+ typecheck(&func_.Ntype, Etype)
+ func_.Type = func_.Ntype.Type
+ func_.Top = top
+
+ // Type check the body now, but only if we're inside a function.
+ // At top level (in a variable initialization: curfn==nil) we're not
+ // ready to type check code yet; we'll check it later, because the
+ // underlying closure function we create is added to xtop.
+ if Curfn != nil && func_.Type != nil {
+ Curfn = func_
+ olddd = decldepth
+ decldepth = 1
+ typechecklist(func_.Nbody, Etop)
+ decldepth = olddd
+ Curfn = oldfn
+ }
+
+ // Create top-level function
+ xtop = list(xtop, makeclosure(func_))
+}
+
+var makeclosure_closgen int
+
+func makeclosure(func_ *Node) *Node {
+ var xtype *Node
+ var xfunc *Node
+
+ /*
+ * wrap body in external function
+ * that begins by reading closure parameters.
+ */
+ xtype = Nod(OTFUNC, nil, nil)
+
+ xtype.List = func_.List
+ xtype.Rlist = func_.Rlist
+
+ // create the function
+ xfunc = Nod(ODCLFUNC, nil, nil)
+
+ makeclosure_closgen++
+ namebuf = fmt.Sprintf("func·%.3d", makeclosure_closgen)
+ xfunc.Nname = newname(Lookup(namebuf))
+ xfunc.Nname.Sym.Flags |= SymExported // disable export
+ xfunc.Nname.Ntype = xtype
+ xfunc.Nname.Defn = xfunc
+ declare(xfunc.Nname, PFUNC)
+ xfunc.Nname.Funcdepth = func_.Funcdepth
+ xfunc.Funcdepth = func_.Funcdepth
+ xfunc.Endlineno = func_.Endlineno
+
+ xfunc.Nbody = func_.Nbody
+ xfunc.Dcl = concat(func_.Dcl, xfunc.Dcl)
+ if xfunc.Nbody == nil {
+ Fatal("empty body - won't generate any code")
+ }
+ typecheck(&xfunc, Etop)
+
+ xfunc.Closure = func_
+ func_.Closure = xfunc
+
+ func_.Nbody = nil
+ func_.List = nil
+ func_.Rlist = nil
+
+ return xfunc
+}
+
+// capturevars is called in a separate phase after all typechecking is done.
+// It decides whether each variable captured by a closure should be captured
+// by value or by reference.
+// We use value capturing for values <= 128 bytes that are never reassigned
+// after capturing (effectively constant).
+func capturevars(xfunc *Node) {
+ var func_ *Node
+ var v *Node
+ var outer *Node
+ var l *NodeList
+ var lno int
+
+ lno = int(lineno)
+ lineno = xfunc.Lineno
+
+ func_ = xfunc.Closure
+ func_.Enter = nil
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Type == nil {
+ // if v->type is nil, it means v looked like it was
+ // going to be used in the closure but wasn't.
+ // this happens because when parsing a, b, c := f()
+ // the a, b, c gets parsed as references to older
+ // a, b, c before the parser figures out this is a
+ // declaration.
+ v.Op = OXXX
+
+ continue
+ }
+
+ // type check the & of closed variables outside the closure,
+ // so that the outer frame also grabs them and knows they escape.
+ dowidth(v.Type)
+
+ outer = v.Outerexpr
+ v.Outerexpr = nil
+
+ // out parameters will be assigned to implicitly upon return.
+ if outer.Class != PPARAMOUT && !(v.Closure.Addrtaken != 0) && !(v.Closure.Assigned != 0) && v.Type.Width <= 128 {
+ v.Byval = 1
+ } else {
+ v.Closure.Addrtaken = 1
+ outer = Nod(OADDR, outer, nil)
+ }
+
+ if Debug['m'] > 1 {
+ var name *Sym
+ var how string
+ name = nil
+ if v.Curfn != nil && v.Curfn.Nname != nil {
+ name = v.Curfn.Nname.Sym
+ }
+ how = "ref"
+ if v.Byval != 0 {
+ how = "value"
+ }
+ Warnl(int(v.Lineno), "%v capturing by %s: %v (addr=%d assign=%d width=%d)", Sconv(name, 0), how, Sconv(v.Sym, 0), v.Closure.Addrtaken, v.Closure.Assigned, int32(v.Type.Width))
+ }
+
+ typecheck(&outer, Erv)
+ func_.Enter = list(func_.Enter, outer)
+ }
+
+ lineno = int32(lno)
+}
+
+// transformclosure is called in a separate phase after escape analysis.
+// It transform closure bodies to properly reference captured variables.
+func transformclosure(xfunc *Node) {
+ var func_ *Node
+ var cv *Node
+ var addr *Node
+ var v *Node
+ var f *Node
+ var l *NodeList
+ var body *NodeList
+ var param **Type
+ var fld *Type
+ var offset int64
+ var lno int
+ var nvar int
+
+ lno = int(lineno)
+ lineno = xfunc.Lineno
+ func_ = xfunc.Closure
+
+ if func_.Top&Ecall != 0 {
+ // If the closure is directly called, we transform it to a plain function call
+ // with variables passed as args. This avoids allocation of a closure object.
+ // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
+ // will complete the transformation later.
+ // For illustration, the following closure:
+ // func(a int) {
+ // println(byval)
+ // byref++
+ // }(42)
+ // becomes:
+ // func(a int, byval int, &byref *int) {
+ // println(byval)
+ // (*&byref)++
+ // }(42, byval, &byref)
+
+ // f is ONAME of the actual function.
+ f = xfunc.Nname
+
+ // Get pointer to input arguments and rewind to the end.
+ // We are going to append captured variables to input args.
+ param = &getinargx(f.Type).Type
+
+ for ; *param != nil; param = &(*param).Down {
+ }
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ fld = typ(TFIELD)
+ fld.Funarg = 1
+ if v.Byval != 0 {
+ // If v is captured by value, we merely downgrade it to PPARAM.
+ v.Class = PPARAM
+
+ v.Ullman = 1
+ fld.Nname = v
+ } else {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PPARAMREF with &v heapaddr
+ // (accesses will implicitly deref &v).
+ namebuf = fmt.Sprintf("&%s", v.Sym.Name)
+
+ addr = newname(Lookup(namebuf))
+ addr.Type = Ptrto(v.Type)
+ addr.Class = PPARAM
+ v.Heapaddr = addr
+ fld.Nname = addr
+ }
+
+ fld.Type = fld.Nname.Type
+ fld.Sym = fld.Nname.Sym
+
+ // Declare the new param and append it to input arguments.
+ xfunc.Dcl = list(xfunc.Dcl, fld.Nname)
+
+ *param = fld
+ param = &fld.Down
+ }
+
+ // Recalculate param offsets.
+ if f.Type.Width > 0 {
+ Fatal("transformclosure: width is already calculated")
+ }
+ dowidth(f.Type)
+ xfunc.Type = f.Type // update type of ODCLFUNC
+ } else {
+ // The closure is not called, so it is going to stay as closure.
+ nvar = 0
+
+ body = nil
+ offset = int64(Widthptr)
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ nvar++
+
+ // cv refers to the field inside of closure OSTRUCTLIT.
+ cv = Nod(OCLOSUREVAR, nil, nil)
+
+ cv.Type = v.Type
+ if !(v.Byval != 0) {
+ cv.Type = Ptrto(v.Type)
+ }
+ offset = Rnd(offset, int64(cv.Type.Align))
+ cv.Xoffset = offset
+ offset += cv.Type.Width
+
+ if v.Byval != 0 && v.Type.Width <= int64(2*Widthptr) && Thearch.Thechar == '6' {
+ // If it is a small variable captured by value, downgrade it to PAUTO.
+ // This optimization is currently enabled only for amd64, see:
+ // https://github.com/golang/go/issues/9865
+ v.Class = PAUTO
+
+ v.Ullman = 1
+ xfunc.Dcl = list(xfunc.Dcl, v)
+ body = list(body, Nod(OAS, v, cv))
+ } else {
+ // Declare variable holding addresses taken from closure
+ // and initialize in entry prologue.
+ namebuf = fmt.Sprintf("&%s", v.Sym.Name)
+
+ addr = newname(Lookup(namebuf))
+ addr.Ntype = Nod(OIND, typenod(v.Type), nil)
+ addr.Class = PAUTO
+ addr.Used = 1
+ addr.Curfn = xfunc
+ xfunc.Dcl = list(xfunc.Dcl, addr)
+ v.Heapaddr = addr
+ if v.Byval != 0 {
+ cv = Nod(OADDR, cv, nil)
+ }
+ body = list(body, Nod(OAS, addr, cv))
+ }
+ }
+
+ typechecklist(body, Etop)
+ walkstmtlist(body)
+ xfunc.Enter = body
+ xfunc.Needctxt = uint8(bool2int(nvar > 0))
+ }
+
+ lineno = int32(lno)
+}
+
+func walkclosure(func_ *Node, init **NodeList) *Node {
+ var clos *Node
+ var typ *Node
+ var typ1 *Node
+ var v *Node
+ var l *NodeList
+
+ // If no closure vars, don't bother wrapping.
+ if func_.Cvars == nil {
+ return func_.Closure.Nname
+ }
+
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{F uintptr; A0 *int; A1 *string}{func·001, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this case)
+ // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+ // The information appears in the binary in the form of type descriptors;
+ // the struct is unnamed so that closures in multiple packages with the
+ // same struct type can share the descriptor.
+
+ typ = Nod(OTSTRUCT, nil, nil)
+
+ typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ typ1 = typenod(v.Type)
+ if !(v.Byval != 0) {
+ typ1 = Nod(OIND, typ1, nil)
+ }
+ typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
+ }
+
+ clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos.Esc = func_.Esc
+ clos.Right.Implicit = 1
+ clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Enter)
+
+ // Force type conversion from *struct to the func type.
+ clos = Nod(OCONVNOP, clos, nil)
+
+ clos.Type = func_.Type
+
+ typecheck(&clos, Erv)
+
+ // typecheck will insert a PTRLIT node under CONVNOP,
+ // tag it with escape analysis result.
+ clos.Left.Esc = func_.Esc
+
+ // non-escaping temp to use, if any.
+ // orderexpr did not compute the type; fill it in now.
+ if func_.Alloc != nil {
+ func_.Alloc.Type = clos.Left.Left.Type
+ func_.Alloc.Orig.Type = func_.Alloc.Type
+ clos.Left.Right = func_.Alloc
+ func_.Alloc = nil
+ }
+
+ walkexpr(&clos, init)
+
+ return clos
+}
+
+func typecheckpartialcall(fn *Node, sym *Node) {
+ switch fn.Op {
+ case ODOTINTER,
+ ODOTMETH:
+ break
+
+ default:
+ Fatal("invalid typecheckpartialcall")
+ }
+
+ // Create top-level function.
+ fn.Nname = makepartialcall(fn, fn.Type, sym)
+
+ fn.Right = sym
+ fn.Op = OCALLPART
+ fn.Type = fn.Nname.Type
+}
+
+var makepartialcall_gopkg *Pkg
+
+func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
+ var ptr *Node
+ var n *Node
+ var fld *Node
+ var call *Node
+ var xtype *Node
+ var xfunc *Node
+ var cv *Node
+ var savecurfn *Node
+ var rcvrtype *Type
+ var basetype *Type
+ var t *Type
+ var body *NodeList
+ var l *NodeList
+ var callargs *NodeList
+ var retargs *NodeList
+ var p string
+ var sym *Sym
+ var spkg *Pkg
+ var i int
+ var ddd int
+
+ // TODO: names are not right
+ rcvrtype = fn.Left.Type
+
+ if exportname(meth.Sym.Name) {
+ p = fmt.Sprintf("%v.%s·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
+ } else {
+ p = fmt.Sprintf("%v.(%v)·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
+ }
+ basetype = rcvrtype
+ if Isptr[rcvrtype.Etype] != 0 {
+ basetype = basetype.Type
+ }
+ if basetype.Etype != TINTER && basetype.Sym == nil {
+ Fatal("missing base type for %v", Tconv(rcvrtype, 0))
+ }
+
+ spkg = nil
+ if basetype.Sym != nil {
+ spkg = basetype.Sym.Pkg
+ }
+ if spkg == nil {
+ if makepartialcall_gopkg == nil {
+ makepartialcall_gopkg = mkpkg(newstrlit("go"))
+ }
+ spkg = makepartialcall_gopkg
+ }
+
+ sym = Pkglookup(p, spkg)
+
+ if sym.Flags&SymUniq != 0 {
+ return sym.Def
+ }
+ sym.Flags |= SymUniq
+
+ savecurfn = Curfn
+ Curfn = nil
+
+ xtype = Nod(OTFUNC, nil, nil)
+ i = 0
+ l = nil
+ callargs = nil
+ ddd = 0
+ xfunc = Nod(ODCLFUNC, nil, nil)
+ Curfn = xfunc
+ for t = getinargx(t0).Type; t != nil; t = t.Down {
+ namebuf = fmt.Sprintf("a%d", i)
+ i++
+ n = newname(Lookup(namebuf))
+ n.Class = PPARAM
+ xfunc.Dcl = list(xfunc.Dcl, n)
+ callargs = list(callargs, n)
+ fld = Nod(ODCLFIELD, n, typenod(t.Type))
+ if t.Isddd != 0 {
+ fld.Isddd = 1
+ ddd = 1
+ }
+
+ l = list(l, fld)
+ }
+
+ xtype.List = l
+ i = 0
+ l = nil
+ retargs = nil
+ for t = getoutargx(t0).Type; t != nil; t = t.Down {
+ namebuf = fmt.Sprintf("r%d", i)
+ i++
+ n = newname(Lookup(namebuf))
+ n.Class = PPARAMOUT
+ xfunc.Dcl = list(xfunc.Dcl, n)
+ retargs = list(retargs, n)
+ l = list(l, Nod(ODCLFIELD, n, typenod(t.Type)))
+ }
+
+ xtype.Rlist = l
+
+ xfunc.Dupok = 1
+ xfunc.Nname = newname(sym)
+ xfunc.Nname.Sym.Flags |= SymExported // disable export
+ xfunc.Nname.Ntype = xtype
+ xfunc.Nname.Defn = xfunc
+ declare(xfunc.Nname, PFUNC)
+
+ // Declare and initialize variable holding receiver.
+ body = nil
+
+ xfunc.Needctxt = 1
+ cv = Nod(OCLOSUREVAR, nil, nil)
+ cv.Xoffset = int64(Widthptr)
+ cv.Type = rcvrtype
+ if int(cv.Type.Align) > Widthptr {
+ cv.Xoffset = int64(cv.Type.Align)
+ }
+ ptr = Nod(ONAME, nil, nil)
+ ptr.Sym = Lookup("rcvr")
+ ptr.Class = PAUTO
+ ptr.Addable = 1
+ ptr.Ullman = 1
+ ptr.Used = 1
+ ptr.Curfn = xfunc
+ xfunc.Dcl = list(xfunc.Dcl, ptr)
+ if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) != 0 {
+ ptr.Ntype = typenod(rcvrtype)
+ body = list(body, Nod(OAS, ptr, cv))
+ } else {
+ ptr.Ntype = typenod(Ptrto(rcvrtype))
+ body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
+ }
+
+ call = Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
+ call.List = callargs
+ call.Isddd = uint8(ddd)
+ if t0.Outtuple == 0 {
+ body = list(body, call)
+ } else {
+ n = Nod(OAS2, nil, nil)
+ n.List = retargs
+ n.Rlist = list1(call)
+ body = list(body, n)
+ n = Nod(ORETURN, nil, nil)
+ body = list(body, n)
+ }
+
+ xfunc.Nbody = body
+
+ typecheck(&xfunc, Etop)
+ sym.Def = xfunc
+ xtop = list(xtop, xfunc)
+ Curfn = savecurfn
+
+ return xfunc
+}
+
+func walkpartialcall(n *Node, init **NodeList) *Node {
+ var clos *Node
+ var typ *Node
+
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{M.T·f, x}
+ //
+ // Like walkclosure above.
+
+ if Isinter(n.Left.Type) != 0 {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.Left = cheapexpr(n.Left, init)
+
+ checknil(n.Left, init)
+ }
+
+ typ = Nod(OTSTRUCT, nil, nil)
+ typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
+ typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
+
+ clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos.Esc = n.Esc
+ clos.Right.Implicit = 1
+ clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
+ clos.List = list(clos.List, n.Left)
+
+ // Force type conversion from *struct to the func type.
+ clos = Nod(OCONVNOP, clos, nil)
+
+ clos.Type = n.Type
+
+ typecheck(&clos, Erv)
+
+ // typecheck will insert a PTRLIT node under CONVNOP,
+ // tag it with escape analysis result.
+ clos.Left.Esc = n.Esc
+
+ // non-escaping temp to use, if any.
+ // orderexpr did not compute the type; fill it in now.
+ if n.Alloc != nil {
+ n.Alloc.Type = clos.Left.Left.Type
+ n.Alloc.Orig.Type = n.Alloc.Type
+ clos.Left.Right = n.Alloc
+ n.Alloc = nil
+ }
+
+ walkexpr(&clos, init)
+
+ return clos
+}
diff --git a/src/cmd/internal/gc/const.go b/src/cmd/internal/gc/const.go
new file mode 100644
index 0000000000..c8c244b216
--- /dev/null
+++ b/src/cmd/internal/gc/const.go
@@ -0,0 +1,1764 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * truncate float literal fv to 32-bit or 64-bit precision
+ * according to type; return truncated value.
+ */
+func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
+ var d float64
+ var fv *Mpflt
+ var v Val
+
+ if t == nil {
+ return oldv
+ }
+
+ v = Val{}
+ v.Ctype = CTFLT
+ v.U.Fval = oldv
+ overflow(v, t)
+
+ fv = new(Mpflt)
+ *fv = *oldv
+
+ // convert large precision literal floating
+ // into limited precision (float64 or float32)
+ switch t.Etype {
+ case TFLOAT64:
+ d = mpgetflt(fv)
+ Mpmovecflt(fv, d)
+
+ case TFLOAT32:
+ d = mpgetflt32(fv)
+ Mpmovecflt(fv, d)
+ }
+
+ return fv
+}
+
+/*
+ * convert n, if literal, to type t.
+ * implicit conversion.
+ */
+func Convlit(np **Node, t *Type) {
+ convlit1(np, t, 0)
+}
+
+/*
+ * convert n, if literal, to type t.
+ * return a new node if necessary
+ * (if n is a named constant, can't edit n->type directly).
+ */
+func convlit1(np **Node, t *Type, explicit int) {
+ var ct int
+ var et int
+ var n *Node
+ var nn *Node
+
+ n = *np
+ if n == nil || t == nil || n.Type == nil || isideal(t) != 0 || n.Type == t {
+ return
+ }
+ if !(explicit != 0) && !(isideal(n.Type) != 0) {
+ return
+ }
+
+ if n.Op == OLITERAL {
+ nn = Nod(OXXX, nil, nil)
+ *nn = *n
+ n = nn
+ *np = n
+ }
+
+ switch n.Op {
+ default:
+ if n.Type == idealbool {
+ if t.Etype == TBOOL {
+ n.Type = t
+ } else {
+ n.Type = Types[TBOOL]
+ }
+ }
+
+ if n.Type.Etype == TIDEAL {
+ Convlit(&n.Left, t)
+ Convlit(&n.Right, t)
+ n.Type = t
+ }
+
+ return
+
+ // target is invalid type for a constant? leave alone.
+ case OLITERAL:
+ if !(okforconst[t.Etype] != 0) && n.Type.Etype != TNIL {
+ defaultlit(&n, nil)
+ *np = n
+ return
+ }
+
+ case OLSH,
+ ORSH:
+ convlit1(&n.Left, t, bool2int(explicit != 0 && isideal(n.Left.Type) != 0))
+ t = n.Left.Type
+ if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
+ n.Val = toint(n.Val)
+ }
+ if t != nil && !(Isint[t.Etype] != 0) {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ t = nil
+ }
+
+ n.Type = t
+ return
+
+ case OCOMPLEX:
+ if n.Type.Etype == TIDEAL {
+ switch t.Etype {
+ // If trying to convert to non-complex type,
+ // leave as complex128 and let typechecker complain.
+ default:
+ t = Types[TCOMPLEX128]
+ fallthrough
+
+ //fallthrough
+ case TCOMPLEX128:
+ n.Type = t
+
+ Convlit(&n.Left, Types[TFLOAT64])
+ Convlit(&n.Right, Types[TFLOAT64])
+
+ case TCOMPLEX64:
+ n.Type = t
+ Convlit(&n.Left, Types[TFLOAT32])
+ Convlit(&n.Right, Types[TFLOAT32])
+ }
+ }
+
+ return
+ }
+
+ // avoided repeated calculations, errors
+ if Eqtype(n.Type, t) {
+ return
+ }
+
+ ct = consttype(n)
+ if ct < 0 {
+ goto bad
+ }
+
+ et = int(t.Etype)
+ if et == TINTER {
+ if ct == CTNIL && n.Type == Types[TNIL] {
+ n.Type = t
+ return
+ }
+
+ defaultlit(np, nil)
+ return
+ }
+
+ switch ct {
+ default:
+ goto bad
+
+ case CTNIL:
+ switch et {
+ default:
+ n.Type = nil
+ goto bad
+
+ // let normal conversion code handle it
+ case TSTRING:
+ return
+
+ case TARRAY:
+ if !(Isslice(t) != 0) {
+ goto bad
+ }
+
+ case TPTR32,
+ TPTR64,
+ TINTER,
+ TMAP,
+ TCHAN,
+ TFUNC,
+ TUNSAFEPTR:
+ break
+
+ // A nil literal may be converted to uintptr
+ // if it is an unsafe.Pointer
+ case TUINTPTR:
+ if n.Type.Etype == TUNSAFEPTR {
+ n.Val.U.Xval = new(Mpint)
+ Mpmovecfix(n.Val.U.Xval, 0)
+ n.Val.Ctype = CTINT
+ } else {
+ goto bad
+ }
+ }
+
+ case CTSTR,
+ CTBOOL:
+ if et != int(n.Type.Etype) {
+ goto bad
+ }
+
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ ct = int(n.Val.Ctype)
+ if Isint[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTCPLX,
+ CTFLT,
+ CTRUNE:
+ n.Val = toint(n.Val)
+ fallthrough
+
+ // flowthrough
+ case CTINT:
+ overflow(n.Val, t)
+ }
+ } else if Isfloat[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTCPLX,
+ CTINT,
+ CTRUNE:
+ n.Val = toflt(n.Val)
+ fallthrough
+
+ // flowthrough
+ case CTFLT:
+ n.Val.U.Fval = truncfltlit(n.Val.U.Fval, t)
+ }
+ } else if Iscomplex[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTFLT,
+ CTINT,
+ CTRUNE:
+ n.Val = tocplx(n.Val)
+
+ case CTCPLX:
+ overflow(n.Val, t)
+ }
+ } else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit != 0 {
+ n.Val = tostr(n.Val)
+ } else {
+ goto bad
+ }
+ }
+
+ n.Type = t
+ return
+
+bad:
+ if !(n.Diag != 0) {
+ if !(t.Broke != 0) {
+ Yyerror("cannot convert %v to type %v", Nconv(n, 0), Tconv(t, 0))
+ }
+ n.Diag = 1
+ }
+
+ if isideal(n.Type) != 0 {
+ defaultlit(&n, nil)
+ *np = n
+ }
+
+ return
+}
+
+func copyval(v Val) Val {
+ var i *Mpint
+ var f *Mpflt
+ var c *Mpcplx
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ i = new(Mpint)
+ mpmovefixfix(i, v.U.Xval)
+ v.U.Xval = i
+
+ case CTFLT:
+ f = new(Mpflt)
+ mpmovefltflt(f, v.U.Fval)
+ v.U.Fval = f
+
+ case CTCPLX:
+ c = new(Mpcplx)
+ mpmovefltflt(&c.Real, &v.U.Cval.Real)
+ mpmovefltflt(&c.Imag, &v.U.Cval.Imag)
+ v.U.Cval = c
+ }
+
+ return v
+}
+
+func tocplx(v Val) Val {
+ var c *Mpcplx
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ c = new(Mpcplx)
+ Mpmovefixflt(&c.Real, v.U.Xval)
+ Mpmovecflt(&c.Imag, 0.0)
+ v.Ctype = CTCPLX
+ v.U.Cval = c
+
+ case CTFLT:
+ c = new(Mpcplx)
+ mpmovefltflt(&c.Real, v.U.Fval)
+ Mpmovecflt(&c.Imag, 0.0)
+ v.Ctype = CTCPLX
+ v.U.Cval = c
+ }
+
+ return v
+}
+
+func toflt(v Val) Val {
+ var f *Mpflt
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ f = new(Mpflt)
+ Mpmovefixflt(f, v.U.Xval)
+ v.Ctype = CTFLT
+ v.U.Fval = f
+
+ case CTCPLX:
+ f = new(Mpflt)
+ mpmovefltflt(f, &v.U.Cval.Real)
+ if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
+ Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ v.Ctype = CTFLT
+ v.U.Fval = f
+ }
+
+ return v
+}
+
+func toint(v Val) Val {
+ var i *Mpint
+
+ switch v.Ctype {
+ case CTRUNE:
+ v.Ctype = CTINT
+
+ case CTFLT:
+ i = new(Mpint)
+ if mpmovefltfix(i, v.U.Fval) < 0 {
+ Yyerror("constant %v truncated to integer", Fconv(v.U.Fval, obj.FmtSharp))
+ }
+ v.Ctype = CTINT
+ v.U.Xval = i
+
+ case CTCPLX:
+ i = new(Mpint)
+ if mpmovefltfix(i, &v.U.Cval.Real) < 0 {
+ Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
+ Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ v.Ctype = CTINT
+ v.U.Xval = i
+ }
+
+ return v
+}
+
+func doesoverflow(v Val, t *Type) int {
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ if !(Isint[t.Etype] != 0) {
+ Fatal("overflow: %v integer constant", Tconv(t, 0))
+ }
+ if Mpcmpfixfix(v.U.Xval, Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[t.Etype]) > 0 {
+ return 1
+ }
+
+ case CTFLT:
+ if !(Isfloat[t.Etype] != 0) {
+ Fatal("overflow: %v floating-point constant", Tconv(t, 0))
+ }
+ if mpcmpfltflt(v.U.Fval, minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.Fval, maxfltval[t.Etype]) >= 0 {
+ return 1
+ }
+
+ case CTCPLX:
+ if !(Iscomplex[t.Etype] != 0) {
+ Fatal("overflow: %v complex constant", Tconv(t, 0))
+ }
+ if mpcmpfltflt(&v.U.Cval.Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.Cval.Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Imag, maxfltval[t.Etype]) >= 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func overflow(v Val, t *Type) {
+ // v has already been converted
+ // to appropriate form for t.
+ if t == nil || t.Etype == TIDEAL {
+ return
+ }
+
+ if !(doesoverflow(v, t) != 0) {
+ return
+ }
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ Yyerror("constant %v overflows %v", Bconv(v.U.Xval, 0), Tconv(t, 0))
+
+ case CTFLT:
+ Yyerror("constant %v overflows %v", Fconv(v.U.Fval, obj.FmtSharp), Tconv(t, 0))
+
+ case CTCPLX:
+ Yyerror("constant %v overflows %v", Fconv(v.U.Fval, obj.FmtSharp), Tconv(t, 0))
+ }
+}
+
+func tostr(v Val) Val {
+ var rune_ uint
+ var s *Strlit
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, Minintval[TINT]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("overflow in int -> string")
+ }
+ rune_ = uint(Mpgetfix(v.U.Xval))
+ s = &Strlit{S: string(rune_)}
+ v = Val{}
+ v.Ctype = CTSTR
+ v.U.Sval = s
+
+ case CTFLT:
+ Yyerror("no float -> string")
+ fallthrough
+
+ case CTNIL:
+ v = Val{}
+ v.Ctype = CTSTR
+ v.U.Sval = new(Strlit)
+ }
+
+ return v
+}
+
+func consttype(n *Node) int {
+ if n == nil || n.Op != OLITERAL {
+ return -1
+ }
+ return int(n.Val.Ctype)
+}
+
+func Isconst(n *Node, ct int) int {
+ var t int
+
+ t = consttype(n)
+
+ // If the caller is asking for CTINT, allow CTRUNE too.
+ // Makes life easier for back ends.
+ return bool2int(t == ct || (ct == CTINT && t == CTRUNE))
+}
+
+func saveorig(n *Node) *Node {
+ var n1 *Node
+
+ if n == n.Orig {
+ // duplicate node for n->orig.
+ n1 = Nod(OLITERAL, nil, nil)
+
+ n.Orig = n1
+ *n1 = *n
+ }
+
+ return n.Orig
+}
+
+/*
+ * if n is constant, rewrite as OLITERAL node.
+ */
+func evconst(n *Node) {
+ var nl *Node
+ var nr *Node
+ var norig *Node
+ var str *Strlit
+ var wl int
+ var wr int
+ var lno int
+ var et int
+ var v Val
+ var rv Val
+ var b Mpint
+ var l1 *NodeList
+ var l2 *NodeList
+
+ // pick off just the opcodes that can be
+ // constant evaluated.
+ switch n.Op {
+ default:
+ return
+
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ OARRAYBYTESTR,
+ OCOM,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMINUS,
+ OMOD,
+ OMUL,
+ ONE,
+ ONOT,
+ OOR,
+ OOROR,
+ OPLUS,
+ ORSH,
+ OSUB,
+ OXOR:
+ break
+
+ case OCONV:
+ if n.Type == nil {
+ return
+ }
+ if !(okforconst[n.Type.Etype] != 0) && n.Type.Etype != TNIL {
+ return
+ }
+
+ // merge adjacent constants in the argument list.
+ case OADDSTR:
+ for l1 = n.List; l1 != nil; l1 = l1.Next {
+ if Isconst(l1.N, CTSTR) != 0 && l1.Next != nil && Isconst(l1.Next.N, CTSTR) != 0 {
+ // merge from l1 up to but not including l2
+ str = new(Strlit)
+ l2 = l1
+ for l2 != nil && Isconst(l2.N, CTSTR) != 0 {
+ nr = l2.N
+ str.S += nr.Val.U.Sval.S
+ l2 = l2.Next
+ }
+
+ nl = Nod(OXXX, nil, nil)
+ *nl = *l1.N
+ nl.Orig = nl
+ nl.Val.Ctype = CTSTR
+ nl.Val.U.Sval = str
+ l1.N = nl
+ l1.Next = l2
+ }
+ }
+
+ // fix list end pointer.
+ for l2 = n.List; l2 != nil; l2 = l2.Next {
+ n.List.End = l2
+ }
+
+ // collapse single-constant list to single constant.
+ if count(n.List) == 1 && Isconst(n.List.N, CTSTR) != 0 {
+ n.Op = OLITERAL
+ n.Val = n.List.N.Val
+ }
+
+ return
+ }
+
+ nl = n.Left
+ if nl == nil || nl.Type == nil {
+ return
+ }
+ if consttype(nl) < 0 {
+ return
+ }
+ wl = int(nl.Type.Etype)
+ if Isint[wl] != 0 || Isfloat[wl] != 0 || Iscomplex[wl] != 0 {
+ wl = TIDEAL
+ }
+
+ nr = n.Right
+ if nr == nil {
+ goto unary
+ }
+ if nr.Type == nil {
+ return
+ }
+ if consttype(nr) < 0 {
+ return
+ }
+ wr = int(nr.Type.Etype)
+ if Isint[wr] != 0 || Isfloat[wr] != 0 || Iscomplex[wr] != 0 {
+ wr = TIDEAL
+ }
+
+ // check for compatible general types (numeric, string, etc)
+ if wl != wr {
+ goto illegal
+ }
+
+ // check for compatible types.
+ switch n.Op {
+ // ideal const mixes with anything but otherwise must match.
+ default:
+ if nl.Type.Etype != TIDEAL {
+ defaultlit(&nr, nl.Type)
+ n.Right = nr
+ }
+
+ if nr.Type.Etype != TIDEAL {
+ defaultlit(&nl, nr.Type)
+ n.Left = nl
+ }
+
+ if nl.Type.Etype != nr.Type.Etype {
+ goto illegal
+ }
+
+ // right must be unsigned.
+ // left can be ideal.
+ case OLSH,
+ ORSH:
+ defaultlit(&nr, Types[TUINT])
+
+ n.Right = nr
+ if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || !(Isint[nr.Type.Etype] != 0)) {
+ goto illegal
+ }
+ if nl.Val.Ctype != CTRUNE {
+ nl.Val = toint(nl.Val)
+ }
+ nr.Val = toint(nr.Val)
+ }
+
+ // copy numeric value to avoid modifying
+ // n->left, in case someone still refers to it (e.g. iota).
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ rv = nr.Val
+
+ // convert to common ideal
+ if v.Ctype == CTCPLX || rv.Ctype == CTCPLX {
+ v = tocplx(v)
+ rv = tocplx(rv)
+ }
+
+ if v.Ctype == CTFLT || rv.Ctype == CTFLT {
+ v = toflt(v)
+ rv = toflt(rv)
+ }
+
+ // Rune and int turns into rune.
+ if v.Ctype == CTRUNE && rv.Ctype == CTINT {
+ rv.Ctype = CTRUNE
+ }
+ if v.Ctype == CTINT && rv.Ctype == CTRUNE {
+ if n.Op == OLSH || n.Op == ORSH {
+ rv.Ctype = CTINT
+ } else {
+ v.Ctype = CTRUNE
+ }
+ }
+
+ if v.Ctype != rv.Ctype {
+ // Use of undefined name as constant?
+ if (v.Ctype == 0 || rv.Ctype == 0) && nerrors > 0 {
+ return
+ }
+ Fatal("constant type mismatch %v(%d) %v(%d)", Tconv(nl.Type, 0), v.Ctype, Tconv(nr.Type, 0), rv.Ctype)
+ }
+
+ // run op
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ goto illegal
+
+ case OADD<<16 | CTINT,
+ OADD<<16 | CTRUNE:
+ mpaddfixfix(v.U.Xval, rv.U.Xval, 0)
+
+ case OSUB<<16 | CTINT,
+ OSUB<<16 | CTRUNE:
+ mpsubfixfix(v.U.Xval, rv.U.Xval)
+
+ case OMUL<<16 | CTINT,
+ OMUL<<16 | CTRUNE:
+ mpmulfixfix(v.U.Xval, rv.U.Xval)
+
+ case ODIV<<16 | CTINT,
+ ODIV<<16 | CTRUNE:
+ if mpcmpfixc(rv.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecfix(v.U.Xval, 1)
+ break
+ }
+
+ mpdivfixfix(v.U.Xval, rv.U.Xval)
+
+ case OMOD<<16 | CTINT,
+ OMOD<<16 | CTRUNE:
+ if mpcmpfixc(rv.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecfix(v.U.Xval, 1)
+ break
+ }
+
+ mpmodfixfix(v.U.Xval, rv.U.Xval)
+
+ case OLSH<<16 | CTINT,
+ OLSH<<16 | CTRUNE:
+ mplshfixfix(v.U.Xval, rv.U.Xval)
+
+ case ORSH<<16 | CTINT,
+ ORSH<<16 | CTRUNE:
+ mprshfixfix(v.U.Xval, rv.U.Xval)
+
+ case OOR<<16 | CTINT,
+ OOR<<16 | CTRUNE:
+ mporfixfix(v.U.Xval, rv.U.Xval)
+
+ case OAND<<16 | CTINT,
+ OAND<<16 | CTRUNE:
+ mpandfixfix(v.U.Xval, rv.U.Xval)
+
+ case OANDNOT<<16 | CTINT,
+ OANDNOT<<16 | CTRUNE:
+ mpandnotfixfix(v.U.Xval, rv.U.Xval)
+
+ case OXOR<<16 | CTINT,
+ OXOR<<16 | CTRUNE:
+ mpxorfixfix(v.U.Xval, rv.U.Xval)
+
+ case OADD<<16 | CTFLT:
+ mpaddfltflt(v.U.Fval, rv.U.Fval)
+
+ case OSUB<<16 | CTFLT:
+ mpsubfltflt(v.U.Fval, rv.U.Fval)
+
+ case OMUL<<16 | CTFLT:
+ mpmulfltflt(v.U.Fval, rv.U.Fval)
+
+ case ODIV<<16 | CTFLT:
+ if mpcmpfltc(rv.U.Fval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecflt(v.U.Fval, 1.0)
+ break
+ }
+
+ mpdivfltflt(v.U.Fval, rv.U.Fval)
+
+ // The default case above would print 'ideal % ideal',
+ // which is not quite an ideal error.
+ case OMOD<<16 | CTFLT:
+ if !(n.Diag != 0) {
+ Yyerror("illegal constant expression: floating-point % operation")
+ n.Diag = 1
+ }
+
+ return
+
+ case OADD<<16 | CTCPLX:
+ mpaddfltflt(&v.U.Cval.Real, &rv.U.Cval.Real)
+ mpaddfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag)
+
+ case OSUB<<16 | CTCPLX:
+ mpsubfltflt(&v.U.Cval.Real, &rv.U.Cval.Real)
+ mpsubfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag)
+
+ case OMUL<<16 | CTCPLX:
+ cmplxmpy(v.U.Cval, rv.U.Cval)
+
+ case ODIV<<16 | CTCPLX:
+ if mpcmpfltc(&rv.U.Cval.Real, 0) == 0 && mpcmpfltc(&rv.U.Cval.Imag, 0) == 0 {
+ Yyerror("complex division by zero")
+ Mpmovecflt(&rv.U.Cval.Real, 1.0)
+ Mpmovecflt(&rv.U.Cval.Imag, 0.0)
+ break
+ }
+
+ cmplxdiv(v.U.Cval, rv.U.Cval)
+
+ case OEQ<<16 | CTNIL:
+ goto settrue
+
+ case ONE<<16 | CTNIL:
+ goto setfalse
+
+ case OEQ<<16 | CTINT,
+ OEQ<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTINT,
+ ONE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTINT,
+ OLT<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTINT,
+ OLE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTINT,
+ OGE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTINT,
+ OGT<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTCPLX:
+ if mpcmpfltflt(&v.U.Cval.Real, &rv.U.Cval.Real) == 0 && mpcmpfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTCPLX:
+ if mpcmpfltflt(&v.U.Cval.Real, &rv.U.Cval.Real) != 0 || mpcmpfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTSTR:
+ if cmpslit(nl, nr) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTSTR:
+ if cmpslit(nl, nr) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTSTR:
+ if cmpslit(nl, nr) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTSTR:
+ if cmpslit(nl, nr) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTSTR:
+ if cmpslit(nl, nr) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTSTR:
+ if cmpslit(nl, nr) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OOROR<<16 | CTBOOL:
+ if v.U.Bval != 0 || rv.U.Bval != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OANDAND<<16 | CTBOOL:
+ if v.U.Bval != 0 && rv.U.Bval != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTBOOL:
+ if v.U.Bval == rv.U.Bval {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTBOOL:
+ if v.U.Bval != rv.U.Bval {
+ goto settrue
+ }
+ goto setfalse
+ }
+
+ goto ret
+
+ // copy numeric value to avoid modifying
+ // nl, in case someone still refers to it (e.g. iota).
+unary:
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ if !(n.Diag != 0) {
+ Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+
+ case OCONV<<16 | CTNIL,
+ OARRAYBYTESTR<<16 | CTNIL:
+ if n.Type.Etype == TSTRING {
+ v = tostr(v)
+ nl.Type = n.Type
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OCONV<<16 | CTINT,
+ OCONV<<16 | CTRUNE,
+ OCONV<<16 | CTFLT,
+ OCONV<<16 | CTSTR:
+ convlit1(&nl, n.Type, 1)
+
+ v = nl.Val
+
+ case OPLUS<<16 | CTINT,
+ OPLUS<<16 | CTRUNE:
+ break
+
+ case OMINUS<<16 | CTINT,
+ OMINUS<<16 | CTRUNE:
+ mpnegfix(v.U.Xval)
+
+ case OCOM<<16 | CTINT,
+ OCOM<<16 | CTRUNE:
+ et = Txxx
+ if nl.Type != nil {
+ et = int(nl.Type.Etype)
+ }
+
+ // calculate the mask in b
+ // result will be (a ^ mask)
+ switch et {
+ // signed guys change sign
+ default:
+ Mpmovecfix(&b, -1)
+
+ // unsigned guys invert their bits
+ case TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64,
+ TUINT,
+ TUINTPTR:
+ mpmovefixfix(&b, Maxintval[et])
+ }
+
+ mpxorfixfix(v.U.Xval, &b)
+
+ case OPLUS<<16 | CTFLT:
+ break
+
+ case OMINUS<<16 | CTFLT:
+ mpnegflt(v.U.Fval)
+
+ case OPLUS<<16 | CTCPLX:
+ break
+
+ case OMINUS<<16 | CTCPLX:
+ mpnegflt(&v.U.Cval.Real)
+ mpnegflt(&v.U.Cval.Imag)
+
+ case ONOT<<16 | CTBOOL:
+ if !(v.U.Bval != 0) {
+ goto settrue
+ }
+ goto setfalse
+ }
+
+ret:
+ norig = saveorig(n)
+ *n = *nl
+
+ // restore value of n->orig.
+ n.Orig = norig
+
+ n.Val = v
+
+ // check range.
+ lno = int(setlineno(n))
+
+ overflow(v, n.Type)
+ lineno = int32(lno)
+
+ // truncate precision for non-ideal float.
+ if v.Ctype == CTFLT && n.Type.Etype != TIDEAL {
+ n.Val.U.Fval = truncfltlit(v.U.Fval, n.Type)
+ }
+ return
+
+settrue:
+ norig = saveorig(n)
+ *n = *Nodbool(1)
+ n.Orig = norig
+ return
+
+setfalse:
+ norig = saveorig(n)
+ *n = *Nodbool(0)
+ n.Orig = norig
+ return
+
+illegal:
+ if !(n.Diag != 0) {
+ Yyerror("illegal constant expression: %v %v %v", Tconv(nl.Type, 0), Oconv(int(n.Op), 0), Tconv(nr.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+}
+
+func nodlit(v Val) *Node {
+ var n *Node
+
+ n = Nod(OLITERAL, nil, nil)
+ n.Val = v
+ switch v.Ctype {
+ default:
+ Fatal("nodlit ctype %d", v.Ctype)
+ fallthrough
+
+ case CTSTR:
+ n.Type = idealstring
+
+ case CTBOOL:
+ n.Type = idealbool
+
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ n.Type = Types[TIDEAL]
+
+ case CTNIL:
+ n.Type = Types[TNIL]
+ }
+
+ return n
+}
+
+func nodcplxlit(r Val, i Val) *Node {
+ var n *Node
+ var c *Mpcplx
+
+ r = toflt(r)
+ i = toflt(i)
+
+ c = new(Mpcplx)
+ n = Nod(OLITERAL, nil, nil)
+ n.Type = Types[TIDEAL]
+ n.Val.U.Cval = c
+ n.Val.Ctype = CTCPLX
+
+ if r.Ctype != CTFLT || i.Ctype != CTFLT {
+ Fatal("nodcplxlit ctype %d/%d", r.Ctype, i.Ctype)
+ }
+
+ mpmovefltflt(&c.Real, r.U.Fval)
+ mpmovefltflt(&c.Imag, i.U.Fval)
+ return n
+}
+
+// idealkind returns a constant kind like consttype
+// but for an arbitrary "ideal" (untyped constant) expression.
+func idealkind(n *Node) int {
+ var k1 int
+ var k2 int
+
+ if n == nil || !(isideal(n.Type) != 0) {
+ return CTxxx
+ }
+
+ switch n.Op {
+ default:
+ return CTxxx
+
+ case OLITERAL:
+ return int(n.Val.Ctype)
+
+ // numeric kinds.
+ case OADD,
+ OAND,
+ OANDNOT,
+ OCOM,
+ ODIV,
+ OMINUS,
+ OMOD,
+ OMUL,
+ OSUB,
+ OXOR,
+ OOR,
+ OPLUS:
+ k1 = idealkind(n.Left)
+
+ k2 = idealkind(n.Right)
+ if k1 > k2 {
+ return k1
+ } else {
+ return k2
+ }
+ fallthrough
+
+ case OREAL,
+ OIMAG:
+ return CTFLT
+
+ case OCOMPLEX:
+ return CTCPLX
+
+ case OADDSTR:
+ return CTSTR
+
+ case OANDAND,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ ONE,
+ ONOT,
+ OOROR,
+ OCMPSTR,
+ OCMPIFACE:
+ return CTBOOL
+
+ // shifts (beware!).
+ case OLSH,
+ ORSH:
+ return idealkind(n.Left)
+ }
+}
+
+func defaultlit(np **Node, t *Type) {
+ var lno int
+ var ctype int
+ var n *Node
+ var nn *Node
+ var t1 *Type
+
+ n = *np
+ if n == nil || !(isideal(n.Type) != 0) {
+ return
+ }
+
+ if n.Op == OLITERAL {
+ nn = Nod(OXXX, nil, nil)
+ *nn = *n
+ n = nn
+ *np = n
+ }
+
+ lno = int(setlineno(n))
+ ctype = idealkind(n)
+ switch ctype {
+ default:
+ if t != nil {
+ Convlit(np, t)
+ return
+ }
+
+ if n.Val.Ctype == CTNIL {
+ lineno = int32(lno)
+ if !(n.Diag != 0) {
+ Yyerror("use of untyped nil")
+ n.Diag = 1
+ }
+
+ n.Type = nil
+ break
+ }
+
+ if n.Val.Ctype == CTSTR {
+ t1 = Types[TSTRING]
+ Convlit(np, t1)
+ break
+ }
+
+ Yyerror("defaultlit: unknown literal: %v", Nconv(n, 0))
+
+ case CTxxx:
+ Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
+
+ case CTBOOL:
+ t1 = Types[TBOOL]
+ if t != nil && t.Etype == TBOOL {
+ t1 = t
+ }
+ Convlit(np, t1)
+
+ case CTINT:
+ t1 = Types[TINT]
+ goto num
+
+ case CTRUNE:
+ t1 = runetype
+ goto num
+
+ case CTFLT:
+ t1 = Types[TFLOAT64]
+ goto num
+
+ case CTCPLX:
+ t1 = Types[TCOMPLEX128]
+ goto num
+ }
+
+ lineno = int32(lno)
+ return
+
+num:
+ if t != nil {
+ if Isint[t.Etype] != 0 {
+ t1 = t
+ n.Val = toint(n.Val)
+ } else if Isfloat[t.Etype] != 0 {
+ t1 = t
+ n.Val = toflt(n.Val)
+ } else if Iscomplex[t.Etype] != 0 {
+ t1 = t
+ n.Val = tocplx(n.Val)
+ }
+ }
+
+ overflow(n.Val, t1)
+ Convlit(np, t1)
+ lineno = int32(lno)
+ return
+}
+
+/*
+ * defaultlit on both nodes simultaneously;
+ * if they're both ideal going in they better
+ * get the same type going out.
+ * force means must assign concrete (non-ideal) type.
+ */
+func defaultlit2(lp **Node, rp **Node, force int) {
+ var l *Node
+ var r *Node
+ var lkind int
+ var rkind int
+
+ l = *lp
+ r = *rp
+ if l.Type == nil || r.Type == nil {
+ return
+ }
+ if !(isideal(l.Type) != 0) {
+ Convlit(rp, l.Type)
+ return
+ }
+
+ if !(isideal(r.Type) != 0) {
+ Convlit(lp, r.Type)
+ return
+ }
+
+ if !(force != 0) {
+ return
+ }
+ if l.Type.Etype == TBOOL {
+ Convlit(lp, Types[TBOOL])
+ Convlit(rp, Types[TBOOL])
+ }
+
+ lkind = idealkind(l)
+ rkind = idealkind(r)
+ if lkind == CTCPLX || rkind == CTCPLX {
+ Convlit(lp, Types[TCOMPLEX128])
+ Convlit(rp, Types[TCOMPLEX128])
+ return
+ }
+
+ if lkind == CTFLT || rkind == CTFLT {
+ Convlit(lp, Types[TFLOAT64])
+ Convlit(rp, Types[TFLOAT64])
+ return
+ }
+
+ if lkind == CTRUNE || rkind == CTRUNE {
+ Convlit(lp, runetype)
+ Convlit(rp, runetype)
+ return
+ }
+
+ Convlit(lp, Types[TINT])
+ Convlit(rp, Types[TINT])
+}
+
+func cmpslit(l, r *Node) int {
+ return stringsCompare(l.Val.U.Sval.S, r.Val.U.Sval.S)
+}
+
+func Smallintconst(n *Node) int {
+ if n.Op == OLITERAL && Isconst(n, CTINT) != 0 && n.Type != nil {
+ switch Simtype[n.Type.Etype] {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TBOOL,
+ TPTR32:
+ return 1
+
+ case TIDEAL,
+ TINT64,
+ TUINT64,
+ TPTR64:
+ if Mpcmpfixfix(n.Val.U.Xval, Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
+ break
+ }
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func nonnegconst(n *Node) int {
+ if n.Op == OLITERAL && n.Type != nil {
+ switch Simtype[n.Type.Etype] {
+ // check negative and 2^31
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TIDEAL:
+ if Mpcmpfixfix(n.Val.U.Xval, Minintval[TUINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
+ break
+ }
+ return int(Mpgetfix(n.Val.U.Xval))
+ }
+ }
+
+ return -1
+}
+
+/*
+ * convert x to type et and back to int64
+ * for sign extension and truncation.
+ */
+func iconv(x int64, et int) int64 {
+ switch et {
+ case TINT8:
+ x = int64(int8(x))
+
+ case TUINT8:
+ x = int64(uint8(x))
+
+ case TINT16:
+ x = int64(int16(x))
+
+ case TUINT16:
+ x = int64(uint64(x))
+
+ case TINT32:
+ x = int64(int32(x))
+
+ case TUINT32:
+ x = int64(uint32(x))
+
+ case TINT64,
+ TUINT64:
+ break
+ }
+
+ return x
+}
+
+/*
+ * convert constant val to type t; leave in con.
+ * for back end.
+ */
+func Convconst(con *Node, t *Type, val *Val) {
+ var i int64
+ var tt int
+
+ tt = Simsimtype(t)
+
+ // copy the constant for conversion
+ Nodconst(con, Types[TINT8], 0)
+
+ con.Type = t
+ con.Val = *val
+
+ if Isint[tt] != 0 {
+ con.Val.Ctype = CTINT
+ con.Val.U.Xval = new(Mpint)
+ switch val.Ctype {
+ default:
+ Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
+ fallthrough
+
+ case CTINT,
+ CTRUNE:
+ i = Mpgetfix(val.U.Xval)
+
+ case CTBOOL:
+ i = int64(val.U.Bval)
+
+ case CTNIL:
+ i = 0
+ }
+
+ i = iconv(i, tt)
+ Mpmovecfix(con.Val.U.Xval, i)
+ return
+ }
+
+ if Isfloat[tt] != 0 {
+ con.Val = toflt(con.Val)
+ if con.Val.Ctype != CTFLT {
+ Fatal("convconst ctype=%d %v", con.Val.Ctype, Tconv(t, 0))
+ }
+ if tt == TFLOAT32 {
+ con.Val.U.Fval = truncfltlit(con.Val.U.Fval, t)
+ }
+ return
+ }
+
+ if Iscomplex[tt] != 0 {
+ con.Val = tocplx(con.Val)
+ if tt == TCOMPLEX64 {
+ con.Val.U.Cval.Real = *truncfltlit(&con.Val.U.Cval.Real, Types[TFLOAT32])
+ con.Val.U.Cval.Imag = *truncfltlit(&con.Val.U.Cval.Imag, Types[TFLOAT32])
+ }
+
+ return
+ }
+
+ Fatal("convconst %v constant", Tconv(t, obj.FmtLong))
+}
+
+// complex multiply v *= rv
+// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
+func cmplxmpy(v *Mpcplx, rv *Mpcplx) {
+ var ac Mpflt
+ var bd Mpflt
+ var bc Mpflt
+ var ad Mpflt
+
+ mpmovefltflt(&ac, &v.Real)
+ mpmulfltflt(&ac, &rv.Real) // ac
+
+ mpmovefltflt(&bd, &v.Imag)
+
+ mpmulfltflt(&bd, &rv.Imag) // bd
+
+ mpmovefltflt(&bc, &v.Imag)
+
+ mpmulfltflt(&bc, &rv.Real) // bc
+
+ mpmovefltflt(&ad, &v.Real)
+
+ mpmulfltflt(&ad, &rv.Imag) // ad
+
+ mpmovefltflt(&v.Real, &ac)
+
+ mpsubfltflt(&v.Real, &bd) // ac-bd
+
+ mpmovefltflt(&v.Imag, &bc)
+
+ mpaddfltflt(&v.Imag, &ad) // bc+ad
+}
+
+// complex divide v /= rv
+// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
+func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
+ var ac Mpflt
+ var bd Mpflt
+ var bc Mpflt
+ var ad Mpflt
+ var cc_plus_dd Mpflt
+
+ mpmovefltflt(&cc_plus_dd, &rv.Real)
+ mpmulfltflt(&cc_plus_dd, &rv.Real) // cc
+
+ mpmovefltflt(&ac, &rv.Imag)
+
+ mpmulfltflt(&ac, &rv.Imag) // dd
+
+ mpaddfltflt(&cc_plus_dd, &ac) // cc+dd
+
+ mpmovefltflt(&ac, &v.Real)
+
+ mpmulfltflt(&ac, &rv.Real) // ac
+
+ mpmovefltflt(&bd, &v.Imag)
+
+ mpmulfltflt(&bd, &rv.Imag) // bd
+
+ mpmovefltflt(&bc, &v.Imag)
+
+ mpmulfltflt(&bc, &rv.Real) // bc
+
+ mpmovefltflt(&ad, &v.Real)
+
+ mpmulfltflt(&ad, &rv.Imag) // ad
+
+ mpmovefltflt(&v.Real, &ac)
+
+ mpaddfltflt(&v.Real, &bd) // ac+bd
+ mpdivfltflt(&v.Real, &cc_plus_dd) // (ac+bd)/(cc+dd)
+
+ mpmovefltflt(&v.Imag, &bc)
+
+ mpsubfltflt(&v.Imag, &ad) // bc-ad
+ mpdivfltflt(&v.Imag, &cc_plus_dd) // (bc+ad)/(cc+dd)
+}
+
+// Is n a Go language constant (as opposed to a compile-time constant)?
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+// Only called for expressions known to evaluated to compile-time
+// constants.
+func isgoconst(n *Node) int {
+ var l *Node
+ var t *Type
+
+ if n.Orig != nil {
+ n = n.Orig
+ }
+
+ switch n.Op {
+ case OADD,
+ OADDSTR,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ OCOM,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMINUS,
+ OMOD,
+ OMUL,
+ ONE,
+ ONOT,
+ OOR,
+ OOROR,
+ OPLUS,
+ ORSH,
+ OSUB,
+ OXOR,
+ OIOTA,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ if isgoconst(n.Left) != 0 && (n.Right == nil || isgoconst(n.Right) != 0) {
+ return 1
+ }
+
+ case OCONV:
+ if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) != 0 {
+ return 1
+ }
+
+ case OLEN,
+ OCAP:
+ l = n.Left
+ if isgoconst(l) != 0 {
+ return 1
+ }
+
+ // Special case: len/cap is constant when applied to array or
+ // pointer to array when the expression does not contain
+ // function calls or channel receive operations.
+ t = l.Type
+
+ if t != nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) != 0 && !(hascallchan(l) != 0) {
+ return 1
+ }
+
+ case OLITERAL:
+ if n.Val.Ctype != CTNIL {
+ return 1
+ }
+
+ case ONAME:
+ l = n.Sym.Def
+ if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
+ return 1
+ }
+
+ case ONONAME:
+ if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
+ return 1
+ }
+
+ // Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
+ case OCALL:
+ l = n.Left
+
+ for l.Op == OPAREN {
+ l = l.Left
+ }
+ if l.Op != ONAME || l.Sym.Pkg != unsafepkg {
+ break
+ }
+ if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
+ return 1
+ }
+ }
+
+ //dump("nonconst", n);
+ return 0
+}
+
+func hascallchan(n *Node) int {
+ var l *NodeList
+
+ if n == nil {
+ return 0
+ }
+ switch n.Op {
+ case OAPPEND,
+ OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCLOSE,
+ OCOMPLEX,
+ OCOPY,
+ ODELETE,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ OREAL,
+ ORECOVER,
+ ORECV:
+ return 1
+ }
+
+ if hascallchan(n.Left) != 0 || hascallchan(n.Right) != 0 {
+ return 1
+ }
+
+ for l = n.List; l != nil; l = l.Next {
+ if hascallchan(l.N) != 0 {
+ return 1
+ }
+ }
+ for l = n.Rlist; l != nil; l = l.Next {
+ if hascallchan(l.N) != 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
diff --git a/src/cmd/internal/gc/cplx.go b/src/cmd/internal/gc/cplx.go
new file mode 100644
index 0000000000..bc5b5478d7
--- /dev/null
+++ b/src/cmd/internal/gc/cplx.go
@@ -0,0 +1,503 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+func CASE(a int, b int) int {
+ return a<<16 | b
+}
+
+func overlap_cplx(f *Node, t *Node) int {
+ // check whether f and t could be overlapping stack references.
+ // not exact, because it's hard to check for the stack register
+ // in portable code. close enough: worst case we will allocate
+ // an extra temporary and the registerizer will clean it up.
+ return bool2int(f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset)
+}
+
+func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
+ var tnl Node
+ var tnr Node
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var na Node
+ var nb Node
+ var nc Node
+
+ // make both sides addable in ullman order
+ if nr != nil {
+ if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ if !(nr.Addable != 0) {
+ Tempname(&tnr, nr.Type)
+ Thearch.Cgen(nr, &tnr)
+ nr = &tnr
+ }
+ }
+
+ if !(nl.Addable != 0) {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ // build tree
+ // real(l) == real(r) && imag(l) == imag(r)
+
+ subnode(&n1, &n2, nl)
+
+ subnode(&n3, &n4, nr)
+
+ na = Node{}
+ na.Op = OANDAND
+ na.Left = &nb
+ na.Right = &nc
+ na.Type = Types[TBOOL]
+
+ nb = Node{}
+ nb.Op = OEQ
+ nb.Left = &n1
+ nb.Right = &n3
+ nb.Type = Types[TBOOL]
+
+ nc = Node{}
+ nc.Op = OEQ
+ nc.Left = &n2
+ nc.Right = &n4
+ nc.Type = Types[TBOOL]
+
+ if op == ONE {
+ true_ = !true_
+ }
+
+ Thearch.Bgen(&na, true_, likely, to)
+}
+
+// break addable nc-complex into nr-real and ni-imaginary
+func subnode(nr *Node, ni *Node, nc *Node) {
+ var tc int
+ var t *Type
+
+ if !(nc.Addable != 0) {
+ Fatal("subnode not addable")
+ }
+
+ tc = Simsimtype(nc.Type)
+ tc = cplxsubtype(tc)
+ t = Types[tc]
+
+ if nc.Op == OLITERAL {
+ nodfconst(nr, t, &nc.Val.U.Cval.Real)
+ nodfconst(ni, t, &nc.Val.U.Cval.Imag)
+ return
+ }
+
+ *nr = *nc
+ nr.Type = t
+
+ *ni = *nc
+ ni.Type = t
+ ni.Xoffset += t.Width
+}
+
+// generate code res = -nl
+func minus(nl *Node, res *Node) {
+ var ra Node
+
+ ra = Node{}
+ ra.Op = OMINUS
+ ra.Left = nl
+ ra.Type = nl.Type
+ Thearch.Cgen(&ra, res)
+}
+
+// build and execute tree
+// real(res) = -real(nl)
+// imag(res) = -imag(nl)
+func complexminus(nl *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n5 Node
+ var n6 Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n5, &n6, res)
+
+ minus(&n1, &n5)
+ minus(&n2, &n6)
+}
+
+// build and execute tree
+// real(res) = real(nl) op real(nr)
+// imag(res) = imag(nl) op imag(nr)
+func complexadd(op int, nl *Node, nr *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var n5 Node
+ var n6 Node
+ var ra Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n3, &n4, nr)
+ subnode(&n5, &n6, res)
+
+ ra = Node{}
+ ra.Op = uint8(op)
+ ra.Left = &n1
+ ra.Right = &n3
+ ra.Type = n1.Type
+ Thearch.Cgen(&ra, &n5)
+
+ ra = Node{}
+ ra.Op = uint8(op)
+ ra.Left = &n2
+ ra.Right = &n4
+ ra.Type = n2.Type
+ Thearch.Cgen(&ra, &n6)
+}
+
+// build and execute tree
+// tmp = real(nl)*real(nr) - imag(nl)*imag(nr)
+// imag(res) = real(nl)*imag(nr) + imag(nl)*real(nr)
+// real(res) = tmp
+func complexmul(nl *Node, nr *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var n5 Node
+ var n6 Node
+ var rm1 Node
+ var rm2 Node
+ var ra Node
+ var tmp Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n3, &n4, nr)
+ subnode(&n5, &n6, res)
+ Tempname(&tmp, n5.Type)
+
+ // real part -> tmp
+ rm1 = Node{}
+
+ rm1.Op = OMUL
+ rm1.Left = &n1
+ rm1.Right = &n3
+ rm1.Type = n1.Type
+
+ rm2 = Node{}
+ rm2.Op = OMUL
+ rm2.Left = &n2
+ rm2.Right = &n4
+ rm2.Type = n2.Type
+
+ ra = Node{}
+ ra.Op = OSUB
+ ra.Left = &rm1
+ ra.Right = &rm2
+ ra.Type = rm1.Type
+ Thearch.Cgen(&ra, &tmp)
+
+ // imag part
+ rm1 = Node{}
+
+ rm1.Op = OMUL
+ rm1.Left = &n1
+ rm1.Right = &n4
+ rm1.Type = n1.Type
+
+ rm2 = Node{}
+ rm2.Op = OMUL
+ rm2.Left = &n2
+ rm2.Right = &n3
+ rm2.Type = n2.Type
+
+ ra = Node{}
+ ra.Op = OADD
+ ra.Left = &rm1
+ ra.Right = &rm2
+ ra.Type = rm1.Type
+ Thearch.Cgen(&ra, &n6)
+
+ // tmp ->real part
+ Thearch.Cgen(&tmp, &n5)
+}
+
+func nodfconst(n *Node, t *Type, fval *Mpflt) {
+ *n = Node{}
+ n.Op = OLITERAL
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Fval = fval
+ n.Val.Ctype = CTFLT
+ n.Type = t
+
+ if !(Isfloat[t.Etype] != 0) {
+ Fatal("nodfconst: bad type %v", Tconv(t, 0))
+ }
+}
+
+/*
+ * cplx.c
+ */
+func Complexop(n *Node, res *Node) int {
+ if n != nil && n.Type != nil {
+ if Iscomplex[n.Type.Etype] != 0 {
+ goto maybe
+ }
+ }
+
+ if res != nil && res.Type != nil {
+ if Iscomplex[res.Type.Etype] != 0 {
+ goto maybe
+ }
+ }
+
+ if n.Op == OREAL || n.Op == OIMAG {
+ goto yes
+ }
+
+ goto no
+
+maybe:
+ switch n.Op {
+ case OCONV, // implemented ops
+ OADD,
+ OSUB,
+ OMUL,
+ OMINUS,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ goto yes
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME:
+ goto yes
+ }
+
+ //dump("\ncomplex-no", n);
+no:
+ return 0
+
+ //dump("\ncomplex-yes", n);
+yes:
+ return 1
+}
+
+func Complexmove(f *Node, t *Node) {
+ var ft int
+ var tt int
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var tmp Node
+
+ if Debug['g'] != 0 {
+ Dump("\ncomplexmove-f", f)
+ Dump("complexmove-t", t)
+ }
+
+ if !(t.Addable != 0) {
+ Fatal("complexmove: to not addable")
+ }
+
+ ft = Simsimtype(f.Type)
+ tt = Simsimtype(t.Type)
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
+ fallthrough
+
+ // complex to complex move/convert.
+ // make f addable.
+ // also use temporary if possible stack overlap.
+ case TCOMPLEX64<<16 | TCOMPLEX64,
+ TCOMPLEX64<<16 | TCOMPLEX128,
+ TCOMPLEX128<<16 | TCOMPLEX64,
+ TCOMPLEX128<<16 | TCOMPLEX128:
+ if !(f.Addable != 0) || overlap_cplx(f, t) != 0 {
+ Tempname(&tmp, f.Type)
+ Complexmove(f, &tmp)
+ f = &tmp
+ }
+
+ subnode(&n1, &n2, f)
+ subnode(&n3, &n4, t)
+
+ Thearch.Cgen(&n1, &n3)
+ Thearch.Cgen(&n2, &n4)
+ }
+}
+
+func Complexgen(n *Node, res *Node) {
+ var nl *Node
+ var nr *Node
+ var tnl Node
+ var tnr Node
+ var n1 Node
+ var n2 Node
+ var tmp Node
+ var tl int
+ var tr int
+
+ if Debug['g'] != 0 {
+ Dump("\ncomplexgen-n", n)
+ Dump("complexgen-res", res)
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ // pick off float/complex opcodes
+ switch n.Op {
+ case OCOMPLEX:
+ if res.Addable != 0 {
+ subnode(&n1, &n2, res)
+ Tempname(&tmp, n1.Type)
+ Thearch.Cgen(n.Left, &tmp)
+ Thearch.Cgen(n.Right, &n2)
+ Thearch.Cgen(&tmp, &n1)
+ return
+ }
+
+ case OREAL,
+ OIMAG:
+ nl = n.Left
+ if !(nl.Addable != 0) {
+ Tempname(&tmp, nl.Type)
+ Complexgen(nl, &tmp)
+ nl = &tmp
+ }
+
+ subnode(&n1, &n2, nl)
+ if n.Op == OREAL {
+ Thearch.Cgen(&n1, res)
+ return
+ }
+
+ Thearch.Cgen(&n2, res)
+ return
+ }
+
+ // perform conversion from n to res
+ tl = Simsimtype(res.Type)
+
+ tl = cplxsubtype(tl)
+ tr = Simsimtype(n.Type)
+ tr = cplxsubtype(tr)
+ if tl != tr {
+ if !(n.Addable != 0) {
+ Tempname(&n1, n.Type)
+ Complexmove(n, &n1)
+ n = &n1
+ }
+
+ Complexmove(n, res)
+ return
+ }
+
+ if !(res.Addable != 0) {
+ Thearch.Igen(res, &n1, nil)
+ Thearch.Cgen(n, &n1)
+ Thearch.Regfree(&n1)
+ return
+ }
+
+ if n.Addable != 0 {
+ Complexmove(n, res)
+ return
+ }
+
+ switch n.Op {
+ default:
+ Dump("complexgen: unknown op", n)
+ Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME, // PHEAP or PPARAMREF var
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ Thearch.Igen(n, &n1, res)
+
+ Complexmove(&n1, res)
+ Thearch.Regfree(&n1)
+ return
+
+ case OCONV,
+ OADD,
+ OSUB,
+ OMUL,
+ OMINUS,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ break
+ }
+
+ nl = n.Left
+ if nl == nil {
+ return
+ }
+ nr = n.Right
+
+ // make both sides addable in ullman order
+ if nr != nil {
+ if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ if !(nr.Addable != 0) {
+ Tempname(&tnr, nr.Type)
+ Thearch.Cgen(nr, &tnr)
+ nr = &tnr
+ }
+ }
+
+ if !(nl.Addable != 0) {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ switch n.Op {
+ default:
+ Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+
+ case OCONV:
+ Complexmove(nl, res)
+
+ case OMINUS:
+ complexminus(nl, res)
+
+ case OADD,
+ OSUB:
+ complexadd(int(n.Op), nl, nr, res)
+
+ case OMUL:
+ complexmul(nl, nr, res)
+ }
+}
diff --git a/src/cmd/internal/gc/dcl.go b/src/cmd/internal/gc/dcl.go
new file mode 100644
index 0000000000..0aeb58742d
--- /dev/null
+++ b/src/cmd/internal/gc/dcl.go
@@ -0,0 +1,1565 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+func dflag() int {
+ if !(Debug['d'] != 0) {
+ return 0
+ }
+ if Debug['y'] != 0 {
+ return 1
+ }
+ if incannedimport != 0 {
+ return 0
+ }
+ return 1
+}
+
+/*
+ * declaration stack & operations
+ */
+func dcopy(a *Sym, b *Sym) {
+ a.Pkg = b.Pkg
+ a.Name = b.Name
+ a.Def = b.Def
+ a.Block = b.Block
+ a.Lastlineno = b.Lastlineno
+}
+
+func push() *Sym {
+ var d *Sym
+
+ d = new(Sym)
+ d.Lastlineno = lineno
+ d.Link = dclstack
+ dclstack = d
+ return d
+}
+
+func pushdcl(s *Sym) *Sym {
+ var d *Sym
+
+ d = push()
+ dcopy(d, s)
+ if dflag() != 0 {
+ fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
+ }
+ return d
+}
+
+func popdcl() {
+ var d *Sym
+ var s *Sym
+ var lno int
+
+ // if(dflag())
+ // print("revert\n");
+
+ for d = dclstack; d != nil; d = d.Link {
+ if d.Name == "" {
+ break
+ }
+ s = Pkglookup(d.Name, d.Pkg)
+ lno = int(s.Lastlineno)
+ dcopy(s, d)
+ d.Lastlineno = int32(lno)
+ if dflag() != 0 {
+ fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
+ }
+ }
+
+ if d == nil {
+ Fatal("popdcl: no mark")
+ }
+ dclstack = d.Link
+ block = d.Block
+}
+
+func poptodcl() {
+ // pop the old marker and push a new one
+ // (cannot reuse the existing one)
+ // because we use the markers to identify blocks
+ // for the goto restriction checks.
+ popdcl()
+
+ markdcl()
+}
+
+func markdcl() {
+ var d *Sym
+
+ d = push()
+ d.Name = "" // used as a mark in fifo
+ d.Block = block
+
+ blockgen++
+ block = blockgen
+}
+
+// if(dflag())
+// print("markdcl\n");
+func dumpdcl(st string) {
+ var s *Sym
+ var d *Sym
+ var i int
+
+ i = 0
+ for d = dclstack; d != nil; d = d.Link {
+ i++
+ fmt.Printf(" %.2d %p", i, d)
+ if d.Name == "" {
+ fmt.Printf("\n")
+ continue
+ }
+
+ fmt.Printf(" '%s'", d.Name)
+ s = Pkglookup(d.Name, d.Pkg)
+ fmt.Printf(" %v\n", Sconv(s, 0))
+ }
+}
+
+func testdclstack() {
+ var d *Sym
+
+ for d = dclstack; d != nil; d = d.Link {
+ if d.Name == "" {
+ if nerrors != 0 {
+ errorexit()
+ }
+ Yyerror("mark left on the stack")
+ continue
+ }
+ }
+}
+
+func redeclare(s *Sym, where string) {
+ var pkgstr *Strlit
+ var line1 int
+ var line2 int
+
+ if s.Lastlineno == 0 {
+ var tmp *Strlit
+ if s.Origpkg != nil {
+ tmp = s.Origpkg.Path
+ } else {
+ tmp = s.Pkg.Path
+ }
+ pkgstr = tmp
+ Yyerror("%v redeclared %s\n"+"\tprevious declaration during import \"%v\"", Sconv(s, 0), where, Zconv(pkgstr, 0))
+ } else {
+ line1 = parserline()
+ line2 = int(s.Lastlineno)
+
+ // When an import and a declaration collide in separate files,
+ // present the import as the "redeclared", because the declaration
+ // is visible where the import is, but not vice versa.
+ // See issue 4510.
+ if s.Def == nil {
+ line2 = line1
+ line1 = int(s.Lastlineno)
+ }
+
+ yyerrorl(int(line1), "%v redeclared %s\n"+"\tprevious declaration at %v", Sconv(s, 0), where, Ctxt.Line(line2))
+ }
+}
+
+var vargen int
+
+/*
+ * declare individual names - var, typ, const
+ */
+
+var declare_typegen int
+
+func declare(n *Node, ctxt int) {
+ var s *Sym
+ var gen int
+
+ if ctxt == PDISCARD {
+ return
+ }
+
+ if isblank(n) {
+ return
+ }
+
+ n.Lineno = int32(parserline())
+ s = n.Sym
+
+ // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ if importpkg == nil && !(typecheckok != 0) && s.Pkg != localpkg {
+ Yyerror("cannot declare name %v", Sconv(s, 0))
+ }
+
+ if ctxt == PEXTERN && s.Name == "init" {
+ Yyerror("cannot declare init - must be func", s)
+ }
+
+ gen = 0
+ if ctxt == PEXTERN {
+ externdcl = list(externdcl, n)
+ if dflag() != 0 {
+ fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), n)
+ }
+ } else {
+ if Curfn == nil && ctxt == PAUTO {
+ Fatal("automatic outside function")
+ }
+ if Curfn != nil {
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ }
+ if n.Op == OTYPE {
+ declare_typegen++
+ gen = declare_typegen
+ } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+ vargen++
+ gen = vargen
+ }
+ pushdcl(s)
+ n.Curfn = Curfn
+ }
+
+ if ctxt == PAUTO {
+ n.Xoffset = 0
+ }
+
+ if s.Block == block {
+ // functype will print errors about duplicate function arguments.
+ // Don't repeat the error here.
+ if ctxt != PPARAM && ctxt != PPARAMOUT {
+ redeclare(s, "in this block")
+ }
+ }
+
+ s.Block = block
+ s.Lastlineno = int32(parserline())
+ s.Def = n
+ n.Vargen = int32(gen)
+ n.Funcdepth = Funcdepth
+ n.Class = uint8(ctxt)
+
+ autoexport(n, ctxt)
+}
+
+func addvar(n *Node, t *Type, ctxt int) {
+ if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
+ Fatal("addvar: n=%v t=%v nil", Nconv(n, 0), Tconv(t, 0))
+ }
+
+ n.Op = ONAME
+ declare(n, ctxt)
+ n.Type = t
+}
+
+/*
+ * declare variables from grammar
+ * new_name_list (type | [type] = expr_list)
+ */
+func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
+ var doexpr int
+ var v *Node
+ var e *Node
+ var as2 *Node
+ var init *NodeList
+
+ init = nil
+ doexpr = bool2int(el != nil)
+
+ if count(el) == 1 && count(vl) > 1 {
+ e = el.N
+ as2 = Nod(OAS2, nil, nil)
+ as2.List = vl
+ as2.Rlist = list1(e)
+ for ; vl != nil; vl = vl.Next {
+ v = vl.N
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Ntype = t
+ v.Defn = as2
+ if Funcdepth > 0 {
+ init = list(init, Nod(ODCL, v, nil))
+ }
+ }
+
+ return list(init, as2)
+ }
+
+ for ; vl != nil; vl = vl.Next {
+ if doexpr != 0 {
+ if el == nil {
+ Yyerror("missing expression in var declaration")
+ break
+ }
+
+ e = el.N
+ el = el.Next
+ } else {
+ e = nil
+ }
+
+ v = vl.N
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Ntype = t
+
+ if e != nil || Funcdepth > 0 || isblank(v) {
+ if Funcdepth > 0 {
+ init = list(init, Nod(ODCL, v, nil))
+ }
+ e = Nod(OAS, v, e)
+ init = list(init, e)
+ if e.Right != nil {
+ v.Defn = e
+ }
+ }
+ }
+
+ if el != nil {
+ Yyerror("extra expression in var declaration")
+ }
+ return init
+}
+
+/*
+ * declare constants from grammar
+ * new_name_list [[type] = expr_list]
+ */
+func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
+ var v *Node
+ var c *Node
+ var vv *NodeList
+
+ vv = nil
+ if cl == nil {
+ if t != nil {
+ Yyerror("const declaration cannot have type without expression")
+ }
+ cl = lastconst
+ t = lasttype
+ } else {
+ lastconst = cl
+ lasttype = t
+ }
+
+ cl = listtreecopy(cl)
+
+ for ; vl != nil; vl = vl.Next {
+ if cl == nil {
+ Yyerror("missing value in const declaration")
+ break
+ }
+
+ c = cl.N
+ cl = cl.Next
+
+ v = vl.N
+ v.Op = OLITERAL
+ declare(v, dclcontext)
+
+ v.Ntype = t
+ v.Defn = c
+
+ vv = list(vv, Nod(ODCLCONST, v, nil))
+ }
+
+ if cl != nil {
+ Yyerror("extra expression in const declaration")
+ }
+ iota_ += 1
+ return vv
+}
+
+/*
+ * this generates a new name node,
+ * typically for labels or other one-off names.
+ */
+func newname(s *Sym) *Node {
+ var n *Node
+
+ if s == nil {
+ Fatal("newname nil")
+ }
+
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ n.Type = nil
+ n.Addable = 1
+ n.Ullman = 1
+ n.Xoffset = 0
+ return n
+}
+
+/*
+ * this generates a new name node for a name
+ * being declared.
+ */
+func dclname(s *Sym) *Node {
+ var n *Node
+
+ n = newname(s)
+ n.Op = ONONAME // caller will correct it
+ return n
+}
+
+func typenod(t *Type) *Node {
+ // if we copied another type with *t = *u
+ // then t->nod might be out of date, so
+ // check t->nod->type too
+ if t.Nod == nil || t.Nod.Type != t {
+ t.Nod = Nod(OTYPE, nil, nil)
+ t.Nod.Type = t
+ t.Nod.Sym = t.Sym
+ }
+
+ return t.Nod
+}
+
+/*
+ * this will return an old name
+ * that has already been pushed on the
+ * declaration list. a diagnostic is
+ * generated if no name has been defined.
+ */
+func oldname(s *Sym) *Node {
+ var n *Node
+ var c *Node
+
+ n = s.Def
+ if n == nil {
+ // maybe a top-level name will come along
+ // to give this a definition later.
+ // walkdef will check s->def again once
+ // all the input source has been processed.
+ n = newname(s)
+
+ n.Op = ONONAME
+ n.Iota = iota_ // save current iota value in const declarations
+ }
+
+ if Curfn != nil && n.Funcdepth > 0 && n.Funcdepth != Funcdepth && n.Op == ONAME {
+ // inner func is referring to var in outer func.
+ //
+ // TODO(rsc): If there is an outer variable x and we
+ // are parsing x := 5 inside the closure, until we get to
+ // the := it looks like a reference to the outer x so we'll
+ // make x a closure variable unnecessarily.
+ if n.Closure == nil || n.Closure.Funcdepth != Funcdepth {
+ // create new closure var.
+ c = Nod(ONAME, nil, nil)
+
+ c.Sym = s
+ c.Class = PPARAMREF
+ c.Isddd = n.Isddd
+ c.Defn = n
+ c.Addable = 0
+ c.Ullman = 2
+ c.Funcdepth = Funcdepth
+ c.Outer = n.Closure
+ n.Closure = c
+ c.Closure = n
+ c.Xoffset = 0
+ Curfn.Cvars = list(Curfn.Cvars, c)
+ }
+
+ // return ref to closure var, not original
+ return n.Closure
+ }
+
+ return n
+}
+
+/*
+ * := declarations
+ */
+func colasname(n *Node) int {
+ switch n.Op {
+ case ONAME,
+ ONONAME,
+ OPACK,
+ OTYPE,
+ OLITERAL:
+ return bool2int(n.Sym != nil)
+ }
+
+ return 0
+}
+
+func colasdefn(left *NodeList, defn *Node) {
+ var nnew int
+ var nerr int
+ var l *NodeList
+ var n *Node
+
+ for l = left; l != nil; l = l.Next {
+ if l.N.Sym != nil {
+ l.N.Sym.Flags |= SymUniq
+ }
+ }
+
+ nnew = 0
+ nerr = 0
+ for l = left; l != nil; l = l.Next {
+ n = l.N
+ if isblank(n) {
+ continue
+ }
+ if !(colasname(n) != 0) {
+ yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", Nconv(n, 0))
+ nerr++
+ continue
+ }
+
+ if n.Sym.Flags&SymUniq == 0 {
+ yyerrorl(int(defn.Lineno), "%v repeated on left side of :=", Sconv(n.Sym, 0))
+ n.Diag++
+ nerr++
+ continue
+ }
+
+ n.Sym.Flags &^= SymUniq
+ if n.Sym.Block == block {
+ continue
+ }
+
+ nnew++
+ n = newname(n.Sym)
+ declare(n, dclcontext)
+ n.Defn = defn
+ defn.Ninit = list(defn.Ninit, Nod(ODCL, n, nil))
+ l.N = n
+ }
+
+ if nnew == 0 && nerr == 0 {
+ yyerrorl(int(defn.Lineno), "no new variables on left side of :=")
+ }
+}
+
+func colas(left *NodeList, right *NodeList, lno int32) *Node {
+ var as *Node
+
+ as = Nod(OAS2, nil, nil)
+ as.List = left
+ as.Rlist = right
+ as.Colas = 1
+ as.Lineno = lno
+ colasdefn(left, as)
+
+ // make the tree prettier; not necessary
+ if count(left) == 1 && count(right) == 1 {
+ as.Left = as.List.N
+ as.Right = as.Rlist.N
+ as.List = nil
+ as.Rlist = nil
+ as.Op = OAS
+ }
+
+ return as
+}
+
+/*
+ * declare the arguments in an
+ * interface field declaration.
+ */
+func ifacedcl(n *Node) {
+ if n.Op != ODCLFIELD || n.Right == nil {
+ Fatal("ifacedcl")
+ }
+
+ if isblank(n.Left) {
+ Yyerror("methods must have a unique non-blank name")
+ }
+
+ dclcontext = PPARAM
+ markdcl()
+ Funcdepth++
+ n.Outer = Curfn
+ Curfn = n
+ funcargs(n.Right)
+
+ // funcbody is normally called after the parser has
+ // seen the body of a function but since an interface
+ // field declaration does not have a body, we must
+ // call it now to pop the current declaration context.
+ dclcontext = PAUTO
+
+ funcbody(n)
+}
+
+/*
+ * declare the function proper
+ * and declare the arguments.
+ * called in extern-declaration context
+ * returns in auto-declaration context.
+ */
+func funchdr(n *Node) {
+ // change the declaration context from extern to auto
+ if Funcdepth == 0 && dclcontext != PEXTERN {
+ Fatal("funchdr: dclcontext")
+ }
+
+ dclcontext = PAUTO
+ markdcl()
+ Funcdepth++
+
+ n.Outer = Curfn
+ Curfn = n
+
+ if n.Nname != nil {
+ funcargs(n.Nname.Ntype)
+ } else if n.Ntype != nil {
+ funcargs(n.Ntype)
+ } else {
+ funcargs2(n.Type)
+ }
+}
+
+func funcargs(nt *Node) {
+ var n *Node
+ var nn *Node
+ var l *NodeList
+ var gen int
+
+ if nt.Op != OTFUNC {
+ Fatal("funcargs %v", Oconv(int(nt.Op), 0))
+ }
+
+ // re-start the variable generation number
+ // we want to use small numbers for the return variables,
+ // so let them have the chunk starting at 1.
+ vargen = count(nt.Rlist)
+
+ // declare the receiver and in arguments.
+ // no n->defn because type checking of func header
+ // will not fill in the types until later
+ if nt.Left != nil {
+ n = nt.Left
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
+ }
+ if n.Left != nil {
+ n.Left.Op = ONAME
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAM)
+ if dclcontext == PAUTO {
+ vargen++
+ n.Left.Vargen = int32(vargen)
+ }
+ }
+ }
+
+ for l = nt.List; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs in %v", Oconv(int(n.Op), 0))
+ }
+ if n.Left != nil {
+ n.Left.Op = ONAME
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAM)
+ if dclcontext == PAUTO {
+ vargen++
+ n.Left.Vargen = int32(vargen)
+ }
+ }
+ }
+
+ // declare the out arguments.
+ gen = count(nt.List)
+ var i int = 0
+ for l = nt.Rlist; l != nil; l = l.Next {
+ n = l.N
+
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs out %v", Oconv(int(n.Op), 0))
+ }
+
+ if n.Left == nil {
+ // Name so that escape analysis can track it. ~r stands for 'result'.
+ namebuf = fmt.Sprintf("~r%d", gen)
+ gen++
+
+ n.Left = newname(Lookup(namebuf))
+ }
+
+ // TODO: n->left->missing = 1;
+ n.Left.Op = ONAME
+
+ if isblank(n.Left) {
+ // Give it a name so we can assign to it during return. ~b stands for 'blank'.
+ // The name must be different from ~r above because if you have
+ // func f() (_ int)
+ // func g() int
+ // f is allowed to use a plain 'return' with no arguments, while g is not.
+ // So the two cases must be distinguished.
+ // We do not record a pointer to the original node (n->orig).
+ // Having multiple names causes too much confusion in later passes.
+ nn = Nod(OXXX, nil, nil)
+
+ *nn = *n.Left
+ nn.Orig = nn
+ namebuf = fmt.Sprintf("~b%d", gen)
+ gen++
+ nn.Sym = Lookup(namebuf)
+ n.Left = nn
+ }
+
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAMOUT)
+ if dclcontext == PAUTO {
+ i++
+ n.Left.Vargen = int32(i)
+ }
+ }
+}
+
+/*
+ * Same as funcargs, except run over an already constructed TFUNC.
+ * This happens during import, where the hidden_fndcl rule has
+ * used functype directly to parse the function's type.
+ */
+func funcargs2(t *Type) {
+ var ft *Type
+ var n *Node
+
+ if t.Etype != TFUNC {
+ Fatal("funcargs2 %v", Tconv(t, 0))
+ }
+
+ if t.Thistuple != 0 {
+ for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
+ if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ continue
+ }
+ n = ft.Nname // no need for newname(ft->nname->sym)
+ n.Type = ft.Type
+ declare(n, PPARAM)
+ }
+ }
+
+ if t.Intuple != 0 {
+ for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
+ if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ continue
+ }
+ n = ft.Nname
+ n.Type = ft.Type
+ declare(n, PPARAM)
+ }
+ }
+
+ if t.Outtuple != 0 {
+ for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
+ if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ continue
+ }
+ n = ft.Nname
+ n.Type = ft.Type
+ declare(n, PPARAMOUT)
+ }
+ }
+}
+
+/*
+ * finish the body.
+ * called in auto-declaration context.
+ * returns in extern-declaration context.
+ */
+func funcbody(n *Node) {
+ // change the declaration context from auto to extern
+ if dclcontext != PAUTO {
+ Fatal("funcbody: dclcontext")
+ }
+ popdcl()
+ Funcdepth--
+ Curfn = n.Outer
+ n.Outer = nil
+ if Funcdepth == 0 {
+ dclcontext = PEXTERN
+ }
+}
+
+/*
+ * new type being defined with name s.
+ */
+func typedcl0(s *Sym) *Node {
+ var n *Node
+
+ n = newname(s)
+ n.Op = OTYPE
+ declare(n, dclcontext)
+ return n
+}
+
+/*
+ * node n, which was returned by typedcl0
+ * is being declared to have uncompiled type t.
+ * return the ODCLTYPE node to use.
+ */
+func typedcl1(n *Node, t *Node, local int) *Node {
+ n.Ntype = t
+ n.Local = uint8(local)
+ return Nod(ODCLTYPE, n, nil)
+}
+
+/*
+ * structs, functions, and methods.
+ * they don't belong here, but where do they belong?
+ */
+func checkembeddedtype(t *Type) {
+ if t == nil {
+ return
+ }
+
+ if t.Sym == nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ if t.Etype == TINTER {
+ Yyerror("embedded type cannot be a pointer to interface")
+ }
+ }
+
+ if Isptr[t.Etype] != 0 {
+ Yyerror("embedded type cannot be a pointer")
+ } else if t.Etype == TFORW && t.Embedlineno == 0 {
+ t.Embedlineno = lineno
+ }
+}
+
+func structfield(n *Node) *Type {
+ var f *Type
+ var lno int
+
+ lno = int(lineno)
+ lineno = n.Lineno
+
+ if n.Op != ODCLFIELD {
+ Fatal("structfield: oops %v\n", Nconv(n, 0))
+ }
+
+ f = typ(TFIELD)
+ f.Isddd = n.Isddd
+
+ if n.Right != nil {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+ if n.Left != nil {
+ n.Left.Type = n.Type
+ }
+ if n.Embedded != 0 {
+ checkembeddedtype(n.Type)
+ }
+ }
+
+ n.Right = nil
+
+ f.Type = n.Type
+ if f.Type == nil {
+ f.Broke = 1
+ }
+
+ switch n.Val.Ctype {
+ case CTSTR:
+ f.Note = n.Val.U.Sval
+
+ default:
+ Yyerror("field annotation must be string")
+ fallthrough
+
+ // fallthrough
+ case CTxxx:
+ f.Note = nil
+ }
+
+ if n.Left != nil && n.Left.Op == ONAME {
+ f.Nname = n.Left
+ f.Embedded = n.Embedded
+ f.Sym = f.Nname.Sym
+ }
+
+ lineno = int32(lno)
+ return f
+}
+
+var uniqgen uint32
+
+func checkdupfields(t *Type, what string) {
+ var lno int
+
+ lno = int(lineno)
+
+ for ; t != nil; t = t.Down {
+ if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
+ if t.Sym.Uniqgen == uniqgen {
+ lineno = t.Nname.Lineno
+ Yyerror("duplicate %s %s", what, t.Sym.Name)
+ } else {
+ t.Sym.Uniqgen = uniqgen
+ }
+ }
+ }
+
+ lineno = int32(lno)
+}
+
+/*
+ * convert a parsed id/type list into
+ * a type for struct/interface/arglist
+ */
+func tostruct(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+ t = typ(TSTRUCT)
+
+ for tp = &t.Type; l != nil; l = l.Next {
+ f = structfield(l.N)
+
+ *tp = f
+ tp = &f.Down
+ }
+
+ for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ uniqgen++
+ checkdupfields(t.Type, "field")
+
+ if !(t.Broke != 0) {
+ checkwidth(t)
+ }
+
+ return t
+}
+
+func tofunargs(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+
+ t = typ(TSTRUCT)
+ t.Funarg = 1
+
+ for tp = &t.Type; l != nil; l = l.Next {
+ f = structfield(l.N)
+ f.Funarg = 1
+
+ // esc.c needs to find f given a PPARAM to add the tag.
+ if l.N.Left != nil && l.N.Left.Class == PPARAM {
+ l.N.Left.Paramfld = f
+ }
+
+ *tp = f
+ tp = &f.Down
+ }
+
+ for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ return t
+}
+
+func interfacefield(n *Node) *Type {
+ var f *Type
+ var lno int
+
+ lno = int(lineno)
+ lineno = n.Lineno
+
+ if n.Op != ODCLFIELD {
+ Fatal("interfacefield: oops %v\n", Nconv(n, 0))
+ }
+
+ if n.Val.Ctype != CTxxx {
+ Yyerror("interface method cannot have annotation")
+ }
+
+ f = typ(TFIELD)
+ f.Isddd = n.Isddd
+
+ if n.Right != nil {
+ if n.Left != nil {
+ // queue resolution of method type for later.
+ // right now all we need is the name list.
+ // avoids cycles for recursive interface types.
+ n.Type = typ(TINTERMETH)
+
+ n.Type.Nname = n.Right
+ n.Left.Type = n.Type
+ queuemethod(n)
+
+ if n.Left.Op == ONAME {
+ f.Nname = n.Left
+ f.Embedded = n.Embedded
+ f.Sym = f.Nname.Sym
+ }
+ } else {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+
+ if n.Embedded != 0 {
+ checkembeddedtype(n.Type)
+ }
+
+ if n.Type != nil {
+ switch n.Type.Etype {
+ case TINTER:
+ break
+
+ case TFORW:
+ Yyerror("interface type loop involving %v", Tconv(n.Type, 0))
+ f.Broke = 1
+
+ default:
+ Yyerror("interface contains embedded non-interface %v", Tconv(n.Type, 0))
+ f.Broke = 1
+ }
+ }
+ }
+ }
+
+ n.Right = nil
+
+ f.Type = n.Type
+ if f.Type == nil {
+ f.Broke = 1
+ }
+
+ lineno = int32(lno)
+ return f
+}
+
+func tointerface(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+ var t1 *Type
+
+ t = typ(TINTER)
+
+ tp = &t.Type
+ for ; l != nil; l = l.Next {
+ f = interfacefield(l.N)
+
+ if l.N.Left == nil && f.Type.Etype == TINTER {
+ // embedded interface, inline methods
+ for t1 = f.Type.Type; t1 != nil; t1 = t1.Down {
+ f = typ(TFIELD)
+ f.Type = t1.Type
+ f.Broke = t1.Broke
+ f.Sym = t1.Sym
+ if f.Sym != nil {
+ f.Nname = newname(f.Sym)
+ }
+ *tp = f
+ tp = &f.Down
+ }
+ } else {
+ *tp = f
+ tp = &f.Down
+ }
+ }
+
+ for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ uniqgen++
+ checkdupfields(t.Type, "method")
+ t = sortinter(t)
+ checkwidth(t)
+
+ return t
+}
+
+func embedded(s *Sym, pkg *Pkg) *Node {
+ var n *Node
+ var name string
+ const (
+ CenterDot = 0xB7
+ )
+ // Names sometimes have disambiguation junk
+ // appended after a center dot. Discard it when
+ // making the name for the embedded struct field.
+ name = s.Name
+
+ if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
+ name = s.Name[:i]
+ }
+
+ if exportname(name) {
+ n = newname(Lookup(name))
+ } else if s.Pkg == builtinpkg {
+ // The name of embedded builtins belongs to pkg.
+ n = newname(Pkglookup(name, pkg))
+ } else {
+ n = newname(Pkglookup(name, s.Pkg))
+ }
+ n = Nod(ODCLFIELD, n, oldname(s))
+ n.Embedded = 1
+ return n
+}
+
+/*
+ * check that the list of declarations is either all anonymous or all named
+ */
+func findtype(l *NodeList) *Node {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ return l.N.Right
+ }
+ }
+ return nil
+}
+
+func checkarglist(all *NodeList, input int) *NodeList {
+ var named int
+ var n *Node
+ var t *Node
+ var nextt *Node
+ var l *NodeList
+
+ named = 0
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ named = 1
+ break
+ }
+ }
+
+ if named != 0 {
+ n = nil
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OKEY && n.Sym == nil {
+ Yyerror("mixed named and unnamed function parameters")
+ break
+ }
+ }
+
+ if l == nil && n != nil && n.Op != OKEY {
+ Yyerror("final function parameter must have type")
+ }
+ }
+
+ nextt = nil
+ for l = all; l != nil; l = l.Next {
+ // can cache result from findtype to avoid
+ // quadratic behavior here, but unlikely to matter.
+ n = l.N
+
+ if named != 0 {
+ if n.Op == OKEY {
+ t = n.Right
+ n = n.Left
+ nextt = nil
+ } else {
+ if nextt == nil {
+ nextt = findtype(l)
+ }
+ t = nextt
+ }
+ } else {
+ t = n
+ n = nil
+ }
+
+ // during import l->n->op is OKEY, but l->n->left->sym == S
+ // means it was a '?', not that it was
+ // a lone type This doesn't matter for the exported
+ // declarations, which are parsed by rules that don't
+ // use checkargs, but can happen for func literals in
+ // the inline bodies.
+ // TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.c prints _ instead of ?
+ if importpkg != nil && n.Sym == nil {
+ n = nil
+ }
+
+ if n != nil && n.Sym == nil {
+ t = n
+ n = nil
+ }
+
+ if n != nil {
+ n = newname(n.Sym)
+ }
+ n = Nod(ODCLFIELD, n, t)
+ if n.Right != nil && n.Right.Op == ODDD {
+ if !(input != 0) {
+ Yyerror("cannot use ... in output argument list")
+ } else if l.Next != nil {
+ Yyerror("can only use ... as final argument in list")
+ }
+ n.Right.Op = OTARRAY
+ n.Right.Right = n.Right.Left
+ n.Right.Left = nil
+ n.Isddd = 1
+ if n.Left != nil {
+ n.Left.Isddd = 1
+ }
+ }
+
+ l.N = n
+ }
+
+ return all
+}
+
+func fakethis() *Node {
+ var n *Node
+
+ n = Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
+ return n
+}
+
+/*
+ * Is this field a method on an interface?
+ * Those methods have an anonymous
+ * *struct{} as the receiver.
+ * (See fakethis above.)
+ */
+func isifacemethod(f *Type) int {
+ var rcvr *Type
+ var t *Type
+
+ rcvr = getthisx(f).Type
+ if rcvr.Sym != nil {
+ return 0
+ }
+ t = rcvr.Type
+ if !(Isptr[t.Etype] != 0) {
+ return 0
+ }
+ t = t.Type
+ if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
+ return 0
+ }
+ return 1
+}
+
+/*
+ * turn a parsed function declaration
+ * into a type
+ */
+func functype(this *Node, in *NodeList, out *NodeList) *Type {
+ var t *Type
+ var rcvr *NodeList
+ var s *Sym
+
+ t = typ(TFUNC)
+
+ rcvr = nil
+ if this != nil {
+ rcvr = list1(this)
+ }
+ t.Type = tofunargs(rcvr)
+ t.Type.Down = tofunargs(out)
+ t.Type.Down.Down = tofunargs(in)
+
+ uniqgen++
+ checkdupfields(t.Type.Type, "argument")
+ checkdupfields(t.Type.Down.Type, "argument")
+ checkdupfields(t.Type.Down.Down.Type, "argument")
+
+ if t.Type.Broke != 0 || t.Type.Down.Broke != 0 || t.Type.Down.Down.Broke != 0 {
+ t.Broke = 1
+ }
+
+ if this != nil {
+ t.Thistuple = 1
+ }
+ t.Outtuple = count(out)
+ t.Intuple = count(in)
+ t.Outnamed = 0
+ if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
+ s = out.N.Left.Orig.Sym
+ if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
+ t.Outnamed = 1
+ }
+ }
+
+ return t
+}
+
+var methodsym_toppkg *Pkg
+
+func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
+ var s *Sym
+ var p string
+ var t *Type
+ var suffix string
+ var spkg *Pkg
+
+ t = t0
+ if t == nil {
+ goto bad
+ }
+ s = t.Sym
+ if s == nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ if t == nil {
+ goto bad
+ }
+ s = t.Sym
+ }
+
+ spkg = nil
+ if s != nil {
+ spkg = s.Pkg
+ }
+
+ // if t0 == *t and t0 has a sym,
+ // we want to see *t, not t0, in the method name.
+ if t != t0 && t0.Sym != nil {
+ t0 = Ptrto(t)
+ }
+
+ suffix = ""
+ if iface != 0 {
+ dowidth(t0)
+ if t0.Width < Types[Tptr].Width {
+ suffix = "·i"
+ }
+ }
+
+ if (spkg == nil || nsym.Pkg != spkg) && !exportname(nsym.Name) {
+ if t0.Sym == nil && Isptr[t0.Etype] != 0 {
+ p = fmt.Sprintf("(%v).%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+ } else {
+ p = fmt.Sprintf("%v.%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+ }
+ } else {
+ if t0.Sym == nil && Isptr[t0.Etype] != 0 {
+ p = fmt.Sprintf("(%v).%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+ } else {
+ p = fmt.Sprintf("%v.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+ }
+ }
+
+ if spkg == nil {
+ if methodsym_toppkg == nil {
+ methodsym_toppkg = mkpkg(newstrlit("go"))
+ }
+ spkg = methodsym_toppkg
+ }
+
+ s = Pkglookup(p, spkg)
+
+ return s
+
+bad:
+ Yyerror("illegal receiver type: %v", Tconv(t0, 0))
+ return nil
+}
+
+func methodname(n *Node, t *Type) *Node {
+ var s *Sym
+
+ s = methodsym(n.Sym, t, 0)
+ if s == nil {
+ return n
+ }
+ return newname(s)
+}
+
+func methodname1(n *Node, t *Node) *Node {
+ var star string
+ var p string
+
+ star = ""
+ if t.Op == OIND {
+ star = "*"
+ t = t.Left
+ }
+
+ if t.Sym == nil || isblank(n) {
+ return newname(n.Sym)
+ }
+
+ if star != "" {
+ p = fmt.Sprintf("(%s%v).%v", star, Sconv(t.Sym, 0), Sconv(n.Sym, 0))
+ } else {
+ p = fmt.Sprintf("%v.%v", Sconv(t.Sym, 0), Sconv(n.Sym, 0))
+ }
+
+ if exportname(t.Sym.Name) {
+ n = newname(Lookup(p))
+ } else {
+ n = newname(Pkglookup(p, t.Sym.Pkg))
+ }
+
+ return n
+}
+
+/*
+ * add a method, declared as a function,
+ * n is fieldname, pa is base type, t is function type
+ */
+func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
+ var f *Type
+ var d *Type
+ var pa *Type
+ var n *Node
+
+ // get field sym
+ if sf == nil {
+ Fatal("no method symbol")
+ }
+
+ // get parent type sym
+ pa = getthisx(t).Type // ptr to this structure
+ if pa == nil {
+ Yyerror("missing receiver")
+ return
+ }
+
+ pa = pa.Type
+ f = methtype(pa, 1)
+ if f == nil {
+ t = pa
+ if t == nil { // rely on typecheck having complained before
+ return
+ }
+ if t != nil {
+ if Isptr[t.Etype] != 0 {
+ if t.Sym != nil {
+ Yyerror("invalid receiver type %v (%v is a pointer type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ t = t.Type
+ }
+
+ if t.Broke != 0 { // rely on typecheck having complained before
+ return
+ }
+ if t.Sym == nil {
+ Yyerror("invalid receiver type %v (%v is an unnamed type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ if Isptr[t.Etype] != 0 {
+ Yyerror("invalid receiver type %v (%v is a pointer type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ if t.Etype == TINTER {
+ Yyerror("invalid receiver type %v (%v is an interface type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+ }
+
+ // Should have picked off all the reasons above,
+ // but just in case, fall back to generic error.
+ Yyerror("invalid receiver type %v (%v / %v)", Tconv(pa, 0), Tconv(pa, obj.FmtLong), Tconv(t, obj.FmtLong))
+
+ return
+ }
+
+ pa = f
+ if pa.Etype == TSTRUCT {
+ for f = pa.Type; f != nil; f = f.Down {
+ if f.Sym == sf {
+ Yyerror("type %v has both field and method named %v", Tconv(pa, 0), Sconv(sf, 0))
+ return
+ }
+ }
+ }
+
+ if local && !(pa.Local != 0) {
+ // defining method on non-local type.
+ Yyerror("cannot define new methods on non-local type %v", Tconv(pa, 0))
+
+ return
+ }
+
+ n = Nod(ODCLFIELD, newname(sf), nil)
+ n.Type = t
+
+ d = nil // last found
+ for f = pa.Method; f != nil; f = f.Down {
+ d = f
+ if f.Etype != TFIELD {
+ Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ if sf.Name != f.Sym.Name {
+ continue
+ }
+ if !Eqtype(t, f.Type) {
+ Yyerror("method redeclared: %v.%v\n\t%v\n\t%v", Tconv(pa, 0), Sconv(sf, 0), Tconv(f.Type, 0), Tconv(t, 0))
+ }
+ return
+ }
+
+ f = structfield(n)
+ f.Nointerface = uint8(bool2int(nointerface))
+
+ // during import unexported method names should be in the type's package
+ if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
+ Fatal("imported method name %v in wrong package %s\n", Sconv(f.Sym, obj.FmtSign), structpkg.Name)
+ }
+
+ if d == nil {
+ pa.Method = f
+ } else {
+ d.Down = f
+ }
+ return
+}
+
+func funccompile(n *Node) {
+ Stksize = BADWIDTH
+ Maxarg = 0
+
+ if n.Type == nil {
+ if nerrors == 0 {
+ Fatal("funccompile missing type")
+ }
+ return
+ }
+
+ // assign parameter offsets
+ checkwidth(n.Type)
+
+ if Curfn != nil {
+ Fatal("funccompile %v inside %v", Sconv(n.Nname.Sym, 0), Sconv(Curfn.Nname.Sym, 0))
+ }
+
+ Stksize = 0
+ dclcontext = PAUTO
+ Funcdepth = n.Funcdepth + 1
+ compile(n)
+ Curfn = nil
+ Funcdepth = 0
+ dclcontext = PEXTERN
+}
+
+func funcsym(s *Sym) *Sym {
+ var p string
+ var s1 *Sym
+
+ p = fmt.Sprintf("%s·f", s.Name)
+ s1 = Pkglookup(p, s.Pkg)
+
+ if s1.Def == nil {
+ s1.Def = newname(s1)
+ s1.Def.Shortname = newname(s)
+ funcsyms = list(funcsyms, s1.Def)
+ }
+
+ return s1
+}
diff --git a/src/cmd/internal/gc/esc.go b/src/cmd/internal/gc/esc.go
new file mode 100644
index 0000000000..35543e1848
--- /dev/null
+++ b/src/cmd/internal/gc/esc.go
@@ -0,0 +1,1437 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// Escape analysis.
+
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// in the static call graph. The algorithm for doing that is taken
+// from Sedgewick, Algorithms, Second Edition, p. 482, with two
+// adaptations.
+//
+// First, a hidden closure function (n->curfn != N) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// The analysis assumes that closures and the functions in which they
+// appear are analyzed together, so that the aliasing between their
+// variables can be modeled more precisely.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+var stack *NodeList
+
+var visitgen uint32
+
+const (
+ EscFuncUnknown = 0 + iota
+ EscFuncPlanned
+ EscFuncStarted
+ EscFuncTagged
+)
+
+func escapes(all *NodeList) {
+ var l *NodeList
+
+ for l = all; l != nil; l = l.Next {
+ l.N.Walkgen = 0
+ }
+
+ visitgen = 0
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Curfn == nil {
+ visit(l.N)
+ }
+ }
+
+ for l = all; l != nil; l = l.Next {
+ l.N.Walkgen = 0
+ }
+}
+
+func visit(n *Node) uint32 {
+ var min uint32
+ var recursive uint32
+ var l *NodeList
+ var block *NodeList
+
+ if n.Walkgen > 0 {
+ // already visited
+ return n.Walkgen
+ }
+
+ visitgen++
+ n.Walkgen = visitgen
+ visitgen++
+ min = visitgen
+
+ l = new(NodeList)
+ l.Next = stack
+ l.N = n
+ stack = l
+ min = visitcodelist(n.Nbody, min)
+ if (min == n.Walkgen || min == n.Walkgen+1) && n.Curfn == nil {
+ // This node is the root of a strongly connected component.
+
+ // The original min passed to visitcodelist was n->walkgen+1.
+ // If visitcodelist found its way back to n->walkgen, then this
+ // block is a set of mutually recursive functions.
+ // Otherwise it's just a lone function that does not recurse.
+ recursive = uint32(bool2int(min == n.Walkgen))
+
+ // Remove connected component from stack.
+ // Mark walkgen so that future visits return a large number
+ // so as not to affect the caller's min.
+ block = stack
+
+ for l = stack; l.N != n; l = l.Next {
+ l.N.Walkgen = ^uint32(0)
+ }
+ n.Walkgen = ^uint32(0)
+ stack = l.Next
+ l.Next = nil
+
+ // Run escape analysis on this set of functions.
+ analyze(block, int(recursive))
+ }
+
+ return min
+}
+
+func visitcodelist(l *NodeList, min uint32) uint32 {
+ for ; l != nil; l = l.Next {
+ min = visitcode(l.N, min)
+ }
+ return min
+}
+
+func visitcode(n *Node, min uint32) uint32 {
+ var fn *Node
+ var m uint32
+
+ if n == nil {
+ return min
+ }
+
+ min = visitcodelist(n.Ninit, min)
+ min = visitcode(n.Left, min)
+ min = visitcode(n.Right, min)
+ min = visitcodelist(n.List, min)
+ min = visitcode(n.Ntest, min)
+ min = visitcode(n.Nincr, min)
+ min = visitcodelist(n.Nbody, min)
+ min = visitcodelist(n.Nelse, min)
+ min = visitcodelist(n.Rlist, min)
+
+ if n.Op == OCALLFUNC || n.Op == OCALLMETH {
+ fn = n.Left
+ if n.Op == OCALLMETH {
+ fn = n.Left.Right.Sym.Def
+ }
+ if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil {
+ m = visit(fn.Defn)
+ if m < min {
+ min = m
+ }
+ }
+ }
+
+ if n.Op == OCLOSURE {
+ m = visit(n.Closure)
+ if m < min {
+ min = m
+ }
+ }
+
+ return min
+}
+
+// An escape analysis pass for a set of functions.
+//
+// First escfunc, esc and escassign recurse over the ast of each
+// function to dig out flow(dst,src) edges between any
+// pointer-containing nodes and store them in dst->escflowsrc. For
+// variables assigned to a variable in an outer scope or used as a
+// return value, they store a flow(theSink, src) edge to a fake node
+// 'the Sink'. For variables referenced in closures, an edge
+// flow(closure, &var) is recorded and the flow of a closure itself to
+// an outer scope is tracked the same way as other variables.
+//
+// Then escflood walks the graph starting at theSink and tags all
+// variables of it can reach an & node as escaping and all function
+// parameters it can reach as leaking.
+//
+// If a value's address is taken but the address does not escape,
+// then the value can stay on the stack. If the value new(T) does
+// not escape, then new(T) can be rewritten into a stack allocation.
+// The same is true of slice literals.
+//
+// If optimizations are disabled (-N), this code is not used.
+// Instead, the compiler assumes that any value whose address
+// is taken without being immediately dereferenced
+// needs to be moved to the heap, and new(T) and slice
+// literals are always real allocations.
+
+type EscState struct {
+ theSink Node
+ funcParam Node
+ dsts *NodeList
+ loopdepth int
+ pdepth int
+ dstcount int
+ edgecount int
+ noesc *NodeList
+ recursive int
+}
+
+var tags [16]*Strlit
+
+func mktag(mask int) *Strlit {
+ var s *Strlit
+ var buf string
+
+ switch mask & EscMask {
+ case EscNone,
+ EscReturn:
+ break
+
+ default:
+ Fatal("escape mktag")
+ }
+
+ mask >>= EscBits
+
+ if mask < len(tags) && tags[mask] != nil {
+ return tags[mask]
+ }
+
+ buf = fmt.Sprintf("esc:0x%x", mask)
+ s = newstrlit(buf)
+ if mask < len(tags) {
+ tags[mask] = s
+ }
+ return s
+}
+
+func parsetag(note *Strlit) int {
+ var em int
+
+ if note == nil {
+ return EscUnknown
+ }
+ if !strings.HasPrefix(note.S, "esc:") {
+ return EscUnknown
+ }
+ em = atoi(note.S[4:])
+ if em == 0 {
+ return EscNone
+ }
+ return EscReturn | em<<EscBits
+}
+
+func analyze(all *NodeList, recursive int) {
+ var l *NodeList
+ var es EscState
+ var e *EscState
+
+ es = EscState{}
+ e = &es
+ e.theSink.Op = ONAME
+ e.theSink.Orig = &e.theSink
+ e.theSink.Class = PEXTERN
+ e.theSink.Sym = Lookup(".sink")
+ e.theSink.Escloopdepth = -1
+ e.recursive = recursive
+
+ e.funcParam.Op = ONAME
+ e.funcParam.Orig = &e.funcParam
+ e.funcParam.Class = PAUTO
+ e.funcParam.Sym = Lookup(".param")
+ e.funcParam.Escloopdepth = 10000000
+
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ l.N.Esc = EscFuncPlanned
+ }
+ }
+
+ // flow-analyze functions
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ escfunc(e, l.N)
+ }
+ }
+
+ // print("escapes: %d e->dsts, %d edges\n", e->dstcount, e->edgecount);
+
+ // visit the upstream of each dst, mark address nodes with
+ // addrescapes, mark parameters unsafe
+ for l = e.dsts; l != nil; l = l.Next {
+ escflood(e, l.N)
+ }
+
+ // for all top level functions, tag the typenodes corresponding to the param nodes
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ esctag(e, l.N)
+ }
+ }
+
+ if Debug['m'] != 0 {
+ for l = e.noesc; l != nil; l = l.Next {
+ if l.N.Esc == EscNone {
+ var tmp *Sym
+ if l.N.Curfn != nil && l.N.Curfn.Nname != nil {
+ tmp = l.N.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ Warnl(int(l.N.Lineno), "%v %v does not escape", Sconv(tmp, 0), Nconv(l.N, obj.FmtShort))
+ }
+ }
+ }
+}
+
+func escfunc(e *EscState, func_ *Node) {
+ var savefn *Node
+ var ll *NodeList
+ var saveld int
+
+ // print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
+
+ if func_.Esc != 1 {
+ Fatal("repeat escfunc %v", Nconv(func_.Nname, 0))
+ }
+ func_.Esc = EscFuncStarted
+
+ saveld = e.loopdepth
+ e.loopdepth = 1
+ savefn = Curfn
+ Curfn = func_
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op != ONAME {
+ continue
+ }
+ switch ll.N.Class {
+ // out params are in a loopdepth between the sink and all local variables
+ case PPARAMOUT:
+ ll.N.Escloopdepth = 0
+
+ case PPARAM:
+ ll.N.Escloopdepth = 1
+ if ll.N.Type != nil && !haspointers(ll.N.Type) {
+ break
+ }
+ if Curfn.Nbody == nil && !Curfn.Noescape {
+ ll.N.Esc = EscHeap
+ } else {
+ ll.N.Esc = EscNone // prime for escflood later
+ }
+ e.noesc = list(e.noesc, ll.N)
+ }
+ }
+
+ // in a mutually recursive group we lose track of the return values
+ if e.recursive != 0 {
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
+ escflows(e, &e.theSink, ll.N)
+ }
+ }
+ }
+
+ escloopdepthlist(e, Curfn.Nbody)
+ esclist(e, Curfn.Nbody, Curfn)
+ Curfn = savefn
+ e.loopdepth = saveld
+}
+
+// Mark labels that have no backjumps to them as not increasing e->loopdepth.
+// Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat
+// and set it to one of the following two. Then in esc we'll clear it again.
+var looping Label
+
+var nonlooping Label
+
+func escloopdepthlist(e *EscState, l *NodeList) {
+ for ; l != nil; l = l.Next {
+ escloopdepth(e, l.N)
+ }
+}
+
+func escloopdepth(e *EscState, n *Node) {
+ if n == nil {
+ return
+ }
+
+ escloopdepthlist(e, n.Ninit)
+
+ switch n.Op {
+ case OLABEL:
+ if !(n.Left != nil) || !(n.Left.Sym != nil) {
+ Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // Walk will complain about this label being already defined, but that's not until
+ // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc
+ // if(n->left->sym->label != nil)
+ // fatal("escape analysis messed up analyzing label: %+N", n);
+ n.Left.Sym.Label = &nonlooping
+
+ case OGOTO:
+ if !(n.Left != nil) || !(n.Left.Sym != nil) {
+ Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // If we come past one that's uninitialized, this must be a (harmless) forward jump
+ // but if it's set to nonlooping the label must have preceded this goto.
+ if n.Left.Sym.Label == &nonlooping {
+ n.Left.Sym.Label = &looping
+ }
+ }
+
+ escloopdepth(e, n.Left)
+ escloopdepth(e, n.Right)
+ escloopdepthlist(e, n.List)
+ escloopdepth(e, n.Ntest)
+ escloopdepth(e, n.Nincr)
+ escloopdepthlist(e, n.Nbody)
+ escloopdepthlist(e, n.Nelse)
+ escloopdepthlist(e, n.Rlist)
+}
+
+func esclist(e *EscState, l *NodeList, up *Node) {
+ for ; l != nil; l = l.Next {
+ esc(e, l.N, up)
+ }
+}
+
+func esc(e *EscState, n *Node, up *Node) {
+ var lno int
+ var ll *NodeList
+ var lr *NodeList
+ var a *Node
+ var v *Node
+
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ // ninit logically runs at a different loopdepth than the rest of the for loop.
+ esclist(e, n.Ninit, n)
+
+ if n.Op == OFOR || n.Op == ORANGE {
+ e.loopdepth++
+ }
+
+ // type switch variables have no ODCL.
+ // process type switch as declaration.
+ // must happen before processing of switch body,
+ // so before recursion.
+ if n.Op == OSWITCH && n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ for ll = n.List; ll != nil; ll = ll.Next { // cases
+
+ // ll->n->nname is the variable per case
+ if ll.N.Nname != nil {
+ ll.N.Nname.Escloopdepth = e.loopdepth
+ }
+ }
+ }
+
+ esc(e, n.Left, n)
+ esc(e, n.Right, n)
+ esc(e, n.Ntest, n)
+ esc(e, n.Nincr, n)
+ esclist(e, n.Nbody, n)
+ esclist(e, n.Nelse, n)
+ esclist(e, n.List, n)
+ esclist(e, n.Rlist, n)
+
+ if n.Op == OFOR || n.Op == ORANGE {
+ e.loopdepth--
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if Curfn != nil && Curfn.Nname != nil {
+ tmp = Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("%v:[%d] %v esc: %v\n", Ctxt.Line(int(lineno)), e.loopdepth, Sconv(tmp, 0), Nconv(n, 0))
+ }
+
+ switch n.Op {
+ // Record loop depth at declaration.
+ case ODCL:
+ if n.Left != nil {
+ n.Left.Escloopdepth = e.loopdepth
+ }
+
+ case OLABEL:
+ if n.Left.Sym.Label == &nonlooping {
+ if Debug['m'] > 1 {
+ fmt.Printf("%v:%v non-looping label\n", Ctxt.Line(int(lineno)), Nconv(n, 0))
+ }
+ } else if n.Left.Sym.Label == &looping {
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: %v looping label\n", Ctxt.Line(int(lineno)), Nconv(n, 0))
+ }
+ e.loopdepth++
+ }
+
+ // See case OLABEL in escloopdepth above
+ // else if(n->left->sym->label == nil)
+ // fatal("escape analysis missed or messed up a label: %+N", n);
+
+ n.Left.Sym.Label = nil
+
+ // Everything but fixed array is a dereference.
+ case ORANGE:
+ if Isfixedarray(n.Type) != 0 && n.List != nil && n.List.Next != nil {
+ escassign(e, n.List.Next.N, n.Right)
+ }
+
+ case OSWITCH:
+ if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ for ll = n.List; ll != nil; ll = ll.Next { // cases
+
+ // ntest->right is the argument of the .(type),
+ // ll->n->nname is the variable per case
+ escassign(e, ll.N.Nname, n.Ntest.Right)
+ }
+ }
+
+ // Filter out the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ case OAS,
+ OASOP:
+ if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && (n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && (n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && n.Left.Left == n.Right.Left.Left { // dst is ONAME dereference // src is slice operation // slice is applied to ONAME dereference // dst and src reference the same base ONAME
+
+ // Here we also assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+ //
+ // Note, this optimization does not apply to OSLICEARR,
+ // because it does introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ if Debug['m'] != 0 {
+ var tmp *Sym
+ if n.Curfn != nil && n.Curfn.Nname != nil {
+ tmp = n.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ Warnl(int(n.Lineno), "%v ignoring self-assignment to %v", Sconv(tmp, 0), Nconv(n.Left, obj.FmtShort))
+ }
+
+ break
+ }
+
+ escassign(e, n.Left, n.Right)
+
+ case OAS2: // x,y = a,b
+ if count(n.List) == count(n.Rlist) {
+ ll = n.List
+ lr = n.Rlist
+ for ; ll != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ escassign(e, ll.N, lr.N)
+ }
+ }
+
+ case OAS2RECV, // v, ok = <-ch
+ OAS2MAPR, // v, ok = m[k]
+ OAS2DOTTYPE: // v, ok = x.(type)
+ escassign(e, n.List.N, n.Rlist.N)
+
+ case OSEND: // ch <- x
+ escassign(e, &e.theSink, n.Right)
+
+ case ODEFER:
+ if e.loopdepth == 1 { // top level
+ break
+ }
+ fallthrough
+
+ // go f(x) - f and x escape
+ // arguments leak out of scope
+ // TODO: leak to a dummy node instead
+ // fallthrough
+ case OPROC:
+ escassign(e, &e.theSink, n.Left.Left)
+
+ escassign(e, &e.theSink, n.Left.Right) // ODDDARG for call
+ for ll = n.Left.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+
+ case OCALLMETH,
+ OCALLFUNC,
+ OCALLINTER:
+ esccall(e, n, up)
+
+ // esccall already done on n->rlist->n. tie it's escretval to n->list
+ case OAS2FUNC: // x,y = f()
+ lr = n.Rlist.N.Escretval
+
+ for ll = n.List; lr != nil && ll != nil; (func() { lr = lr.Next; ll = ll.Next })() {
+ escassign(e, ll.N, lr.N)
+ }
+ if lr != nil || ll != nil {
+ Fatal("esc oas2func")
+ }
+
+ case ORETURN:
+ ll = n.List
+ if count(n.List) == 1 && Curfn.Type.Outtuple > 1 {
+ // OAS2FUNC in disguise
+ // esccall already done on n->list->n
+ // tie n->list->n->escretval to curfn->dcl PPARAMOUT's
+ ll = n.List.N.Escretval
+ }
+
+ for lr = Curfn.Dcl; lr != nil && ll != nil; lr = lr.Next {
+ if lr.N.Op != ONAME || lr.N.Class != PPARAMOUT {
+ continue
+ }
+ escassign(e, lr.N, ll.N)
+ ll = ll.Next
+ }
+
+ if ll != nil {
+ Fatal("esc return list")
+ }
+
+ // Argument could leak through recover.
+ case OPANIC:
+ escassign(e, &e.theSink, n.Left)
+
+ case OAPPEND:
+ if !(n.Isddd != 0) {
+ for ll = n.List.Next; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
+ }
+ }
+
+ case OCONV,
+ OCONVNOP,
+ OCONVIFACE:
+ escassign(e, n, n.Left)
+
+ case OARRAYLIT:
+ if Isslice(n.Type) != 0 {
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Values make it to memory, lose track.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N.Right)
+ }
+ } else {
+ // Link values to array.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, n, ll.N.Right)
+ }
+ }
+
+ // Link values to struct.
+ case OSTRUCTLIT:
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, n, ll.N.Right)
+ }
+
+ case OPTRLIT:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too.
+ escassign(e, n, n.Left)
+
+ case OCALLPART:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Contents make it to memory, lose track.
+ escassign(e, &e.theSink, n.Left)
+
+ case OMAPLIT:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Keys and values make it to memory, lose track.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N.Left)
+ escassign(e, &e.theSink, ll.N.Right)
+ }
+
+ // Link addresses of captured variables to closure.
+ case OCLOSURE:
+ for ll = n.Cvars; ll != nil; ll = ll.Next {
+ v = ll.N
+ if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
+ continue
+ }
+ a = v.Closure
+ if !(v.Byval != 0) {
+ a = Nod(OADDR, a, nil)
+ a.Lineno = v.Lineno
+ a.Escloopdepth = e.loopdepth
+ typecheck(&a, Erv)
+ }
+
+ escassign(e, n, a)
+ }
+ fallthrough
+
+ // fallthrough
+ case OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ ONEW,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ ORUNESTR:
+ n.Escloopdepth = e.loopdepth
+
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ case OADDSTR:
+ n.Escloopdepth = e.loopdepth
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ // Arguments of OADDSTR do not escape.
+
+ case OADDR:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ // current loop depth is an upper bound on actual loop depth
+ // of addressed value.
+ n.Escloopdepth = e.loopdepth
+
+ // for &x, use loop depth of x if known.
+ // it should always be known, but if not, be conservative
+ // and keep the current loop depth.
+ if n.Left.Op == ONAME {
+ switch n.Left.Class {
+ case PAUTO:
+ if n.Left.Escloopdepth != 0 {
+ n.Escloopdepth = n.Left.Escloopdepth
+ }
+
+ // PPARAM is loop depth 1 always.
+ // PPARAMOUT is loop depth 0 for writes
+ // but considered loop depth 1 for address-of,
+ // so that writing the address of one result
+ // to another (or the same) result makes the
+ // first result move to the heap.
+ case PPARAM,
+ PPARAMOUT:
+ n.Escloopdepth = 1
+ }
+ }
+ }
+
+ lineno = int32(lno)
+}
+
+// Assert that expr somehow gets assigned to dst, if non nil. for
+// dst==nil, any name node expr still must be marked as being
+// evaluated in curfn. For expr==nil, dst must still be examined for
+// evaluations inside it (e.g *f(x) = y)
+func escassign(e *EscState, dst *Node, src *Node) {
+ var lno int
+ var ll *NodeList
+
+ if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
+ return
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if Curfn != nil && Curfn.Nname != nil {
+ tmp = Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("%v:[%d] %v escassign: %v(%v) = %v(%v)\n", Ctxt.Line(int(lineno)), e.loopdepth, Sconv(tmp, 0), Nconv(dst, obj.FmtShort), Jconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort))
+ }
+
+ setlineno(dst)
+
+ // Analyze lhs of assignment.
+ // Replace dst with e->theSink if we can't track it.
+ switch dst.Op {
+ default:
+ Dump("dst", dst)
+ Fatal("escassign: unexpected dst")
+ fallthrough
+
+ case OARRAYLIT,
+ OCLOSURE,
+ OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OPTRLIT,
+ OCALLPART:
+ break
+
+ case ONAME:
+ if dst.Class == PEXTERN {
+ dst = &e.theSink
+ }
+
+ case ODOT: // treat "dst.x = src" as "dst = src"
+ escassign(e, dst.Left, src)
+
+ return
+
+ case OINDEX:
+ if Isfixedarray(dst.Left.Type) != 0 {
+ escassign(e, dst.Left, src)
+ return
+ }
+
+ dst = &e.theSink // lose track of dereference
+
+ case OIND,
+ ODOTPTR:
+ dst = &e.theSink // lose track of dereference
+
+ // lose track of key and value
+ case OINDEXMAP:
+ escassign(e, &e.theSink, dst.Right)
+
+ dst = &e.theSink
+ }
+
+ lno = int(setlineno(src))
+ e.pdepth++
+
+ switch src.Op {
+ case OADDR, // dst = &x
+ OIND, // dst = *x
+ ODOTPTR, // dst = (*x).f
+ ONAME,
+ OPARAM,
+ ODDDARG,
+ OPTRLIT,
+ OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ OADDSTR,
+ ONEW,
+ OCLOSURE,
+ OCALLPART,
+ ORUNESTR:
+ escflows(e, dst, src)
+
+ // Flowing multiple returns to a single dst happens when
+ // analyzing "go f(g())": here g() flows to sink (issue 4529).
+ case OCALLMETH,
+ OCALLFUNC,
+ OCALLINTER:
+ for ll = src.Escretval; ll != nil; ll = ll.Next {
+ escflows(e, dst, ll.N)
+ }
+
+ // A non-pointer escaping from a struct does not concern us.
+ case ODOT:
+ if src.Type != nil && !haspointers(src.Type) {
+ break
+ }
+ fallthrough
+
+ // Conversions, field access, slice all preserve the input value.
+ // fallthrough
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ODOTMETH,
+ // treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC
+ // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here
+ ODOTTYPE,
+ ODOTTYPE2,
+ OSLICE,
+ OSLICE3,
+ OSLICEARR,
+ OSLICE3ARR,
+ OSLICESTR:
+ escassign(e, dst, src.Left)
+
+ // Append returns first argument.
+ case OAPPEND:
+ escassign(e, dst, src.List.N)
+
+ // Index of array preserves input value.
+ case OINDEX:
+ if Isfixedarray(src.Left.Type) != 0 {
+ escassign(e, dst, src.Left)
+ }
+
+ // Might be pointer arithmetic, in which case
+ // the operands flow into the result.
+ // TODO(rsc): Decide what the story is here. This is unsettling.
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ OMINUS,
+ OCOM:
+ escassign(e, dst, src.Left)
+
+ escassign(e, dst, src.Right)
+ }
+
+ e.pdepth--
+ lineno = int32(lno)
+}
+
+func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int {
+ var em int
+ var em0 int
+
+ em = parsetag(note)
+
+ if em == EscUnknown {
+ escassign(e, &e.theSink, src)
+ return em
+ }
+
+ if em == EscNone {
+ return em
+ }
+
+ // If content inside parameter (reached via indirection)
+ // escapes back to results, mark as such.
+ if em&EscContentEscapes != 0 {
+ escassign(e, &e.funcParam, src)
+ }
+
+ em0 = em
+ for em >>= EscReturnBits; em != 0 && dsts != nil; (func() { em >>= 1; dsts = dsts.Next })() {
+ if em&1 != 0 {
+ escassign(e, dsts.N, src)
+ }
+ }
+
+ if em != 0 && dsts == nil {
+ Fatal("corrupt esc tag %v or messed up escretval list\n", Zconv(note, 0))
+ }
+ return em0
+}
+
+// This is a bit messier than fortunate, pulled out of esc's big
+// switch for clarity. We either have the paramnodes, which may be
+// connected to other things through flows or we have the parameter type
+// nodes, which may be marked "noescape". Navigating the ast is slightly
+// different for methods vs plain functions and for imported vs
+// this-package
+func esccall(e *EscState, n *Node, up *Node) {
+ var ll *NodeList
+ var lr *NodeList
+ var a *Node
+ var fn *Node
+ var src *Node
+ var t *Type
+ var fntype *Type
+ var buf string
+ var i int
+
+ fn = nil
+ switch n.Op {
+ default:
+ Fatal("esccall")
+ fallthrough
+
+ case OCALLFUNC:
+ fn = n.Left
+ fntype = fn.Type
+
+ case OCALLMETH:
+ fn = n.Left.Right.Sym.Def
+ if fn != nil {
+ fntype = fn.Type
+ } else {
+ fntype = n.Left.Type
+ }
+
+ case OCALLINTER:
+ fntype = n.Left.Type
+ }
+
+ ll = n.List
+ if n.List != nil && n.List.Next == nil {
+ a = n.List.N
+ if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
+ ll = a.Escretval
+ }
+ }
+
+ if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil && fn.Defn.Nbody != nil && fn.Ntype != nil && fn.Defn.Esc < EscFuncTagged {
+ // function in same mutually recursive group. Incorporate into flow graph.
+ // print("esc local fn: %N\n", fn->ntype);
+ if fn.Defn.Esc == EscFuncUnknown || n.Escretval != nil {
+ Fatal("graph inconsistency")
+ }
+
+ // set up out list on this call node
+ for lr = fn.Ntype.Rlist; lr != nil; lr = lr.Next {
+ n.Escretval = list(n.Escretval, lr.N.Left) // type.rlist -> dclfield -> ONAME (PPARAMOUT)
+ }
+
+ // Receiver.
+ if n.Op != OCALLFUNC {
+ escassign(e, fn.Ntype.Left.Left, n.Left.Left)
+ }
+
+ for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ src = ll.N
+ if lr.N.Isddd != 0 && !(n.Isddd != 0) {
+ // Introduce ODDDARG node to represent ... allocation.
+ src = Nod(ODDDARG, nil, nil)
+
+ src.Type = typ(TARRAY)
+ src.Type.Type = lr.N.Type.Type
+ src.Type.Bound = int64(count(ll))
+ src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+ src.Escloopdepth = e.loopdepth
+ src.Lineno = n.Lineno
+ src.Esc = EscNone // until we find otherwise
+ e.noesc = list(e.noesc, src)
+ n.Right = src
+ }
+
+ if lr.N.Left != nil {
+ escassign(e, lr.N.Left, src)
+ }
+ if src != ll.N {
+ break
+ }
+ }
+
+ // "..." arguments are untracked
+ for ; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+
+ return
+ }
+
+ // Imported or completely analyzed function. Use the escape tags.
+ if n.Escretval != nil {
+ Fatal("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ // set up out list on this call node with dummy auto ONAMES in the current (calling) function.
+ i = 0
+
+ for t = getoutargx(fntype).Type; t != nil; t = t.Down {
+ src = Nod(ONAME, nil, nil)
+ buf = fmt.Sprintf(".dum%d", i)
+ i++
+ src.Sym = Lookup(buf)
+ src.Type = t.Type
+ src.Class = PAUTO
+ src.Curfn = Curfn
+ src.Escloopdepth = e.loopdepth
+ src.Used = 1
+ src.Lineno = n.Lineno
+ n.Escretval = list(n.Escretval, src)
+ }
+
+ // print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, n->escretval);
+
+ // Receiver.
+ if n.Op != OCALLFUNC {
+ t = getthisx(fntype).Type
+ src = n.Left.Left
+ if haspointers(t.Type) {
+ escassignfromtag(e, t.Note, n.Escretval, src)
+ }
+ }
+
+ for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
+ src = ll.N
+ if t.Isddd != 0 && !(n.Isddd != 0) {
+ // Introduce ODDDARG node to represent ... allocation.
+ src = Nod(ODDDARG, nil, nil)
+
+ src.Escloopdepth = e.loopdepth
+ src.Lineno = n.Lineno
+ src.Type = typ(TARRAY)
+ src.Type.Type = t.Type.Type
+ src.Type.Bound = int64(count(ll))
+ src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+ src.Esc = EscNone // until we find otherwise
+ e.noesc = list(e.noesc, src)
+ n.Right = src
+ }
+
+ if haspointers(t.Type) {
+ if escassignfromtag(e, t.Note, n.Escretval, src) == EscNone && up.Op != ODEFER && up.Op != OPROC {
+ a = src
+ for a.Op == OCONVNOP {
+ a = a.Left
+ }
+ switch a.Op {
+ // The callee has already been analyzed, so its arguments have esc tags.
+ // The argument is marked as not escaping at all.
+ // Record that fact so that any temporary used for
+ // synthesizing this expression can be reclaimed when
+ // the function returns.
+ // This 'noescape' is even stronger than the usual esc == EscNone.
+ // src->esc == EscNone means that src does not escape the current function.
+ // src->noescape = 1 here means that src does not escape this statement
+ // in the current function.
+ case OCALLPART,
+ OCLOSURE,
+ ODDDARG,
+ OARRAYLIT,
+ OPTRLIT,
+ OSTRUCTLIT:
+ a.Noescape = true
+ }
+ }
+ }
+
+ if src != ll.N {
+ break
+ }
+ t = t.Down
+ }
+
+ // "..." arguments are untracked
+ for ; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+}
+
+// Store the link src->dst in dst, throwing out some quick wins.
+func escflows(e *EscState, dst *Node, src *Node) {
+ if dst == nil || src == nil || dst == src {
+ return
+ }
+
+ // Don't bother building a graph for scalars.
+ if src.Type != nil && !haspointers(src.Type) {
+ return
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v::flows:: %v <- %v\n", Ctxt.Line(int(lineno)), Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort))
+ }
+
+ if dst.Escflowsrc == nil {
+ e.dsts = list(e.dsts, dst)
+ e.dstcount++
+ }
+
+ e.edgecount++
+
+ dst.Escflowsrc = list(dst.Escflowsrc, src)
+}
+
+// Whenever we hit a reference node, the level goes up by one, and whenever
+// we hit an OADDR, the level goes down by one. as long as we're on a level > 0
+// finding an OADDR just means we're following the upstream of a dereference,
+// so this address doesn't leak (yet).
+// If level == 0, it means the /value/ of this node can reach the root of this flood.
+// so if this node is an OADDR, it's argument should be marked as escaping iff
+// it's currfn/e->loopdepth are different from the flood's root.
+// Once an object has been moved to the heap, all of it's upstream should be considered
+// escaping to the global scope.
+func escflood(e *EscState, dst *Node) {
+ var l *NodeList
+
+ switch dst.Op {
+ case ONAME,
+ OCLOSURE:
+ break
+
+ default:
+ return
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if dst.Curfn != nil && dst.Curfn.Nname != nil {
+ tmp = dst.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", walkgen, Nconv(dst, obj.FmtShort), Sconv(tmp, 0), dst.Escloopdepth)
+ }
+
+ for l = dst.Escflowsrc; l != nil; l = l.Next {
+ walkgen++
+ escwalk(e, 0, dst, l.N)
+ }
+}
+
+// There appear to be some loops in the escape graph, causing
+// arbitrary recursion into deeper and deeper levels.
+// Cut this off safely by making minLevel sticky: once you
+// get that deep, you cannot go down any further but you also
+// cannot go up any further. This is a conservative fix.
+// Making minLevel smaller (more negative) would handle more
+// complex chains of indirections followed by address-of operations,
+// at the cost of repeating the traversal once for each additional
+// allowed level when a loop is encountered. Using -2 suffices to
+// pass all the tests we have written so far, which we assume matches
+// the level of complexity we want the escape analysis code to handle.
+const (
+ MinLevel = -2
+)
+
+func escwalk(e *EscState, level int, dst *Node, src *Node) {
+ var ll *NodeList
+ var leaks int
+ var newlevel int
+
+ if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
+ return
+ }
+ src.Walkgen = walkgen
+ src.Esclevel = int32(level)
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if src.Curfn != nil && src.Curfn.Nname != nil {
+ tmp = src.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("escwalk: level:%d depth:%d %.*s %v(%v) scope:%v[%d]\n", level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), Sconv(tmp, 0), src.Escloopdepth)
+ }
+
+ e.pdepth++
+
+ // Input parameter flowing to output parameter?
+ if dst.Op == ONAME && dst.Class == PPARAMOUT && dst.Vargen <= 20 {
+ if src.Op == ONAME && src.Class == PPARAM && src.Curfn == dst.Curfn && src.Esc != EscScope && src.Esc != EscHeap {
+ if level == 0 {
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking param: %v to result %v", Nconv(src, obj.FmtShort), Sconv(dst.Sym, 0))
+ }
+ if src.Esc&EscMask != EscReturn {
+ src.Esc = EscReturn
+ }
+ src.Esc |= 1 << uint((dst.Vargen-1)+EscReturnBits)
+ goto recurse
+ } else if level > 0 {
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v leaking param %v content to result %v", Nconv(src.Curfn.Nname, 0), Nconv(src, obj.FmtShort), Sconv(dst.Sym, 0))
+ }
+ if src.Esc&EscMask != EscReturn {
+ src.Esc = EscReturn
+ }
+ src.Esc |= EscContentEscapes
+ goto recurse
+ }
+ }
+ }
+
+ // The second clause is for values pointed at by an object passed to a call
+ // that returns something reached via indirect from the object.
+ // We don't know which result it is or how many indirects, so we treat it as leaking.
+ leaks = bool2int(level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type))
+
+ switch src.Op {
+ case ONAME:
+ if src.Class == PPARAM && (leaks != 0 || dst.Escloopdepth < 0) && src.Esc != EscHeap {
+ src.Esc = EscScope
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ // Treat a PPARAMREF closure variable as equivalent to the
+ // original variable.
+ if src.Class == PPARAMREF {
+ if leaks != 0 && Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
+ }
+ escwalk(e, level, dst, src.Closure)
+ }
+
+ case OPTRLIT,
+ OADDR:
+ if leaks != 0 {
+ src.Esc = EscHeap
+ addrescapes(src.Left)
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ newlevel = level
+ if level > MinLevel {
+ newlevel--
+ }
+ escwalk(e, newlevel, dst, src.Left)
+
+ case OARRAYLIT:
+ if Isfixedarray(src.Type) != 0 {
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ODDDARG,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ OADDSTR,
+ OMAPLIT,
+ ONEW,
+ OCLOSURE,
+ OCALLPART,
+ ORUNESTR:
+ if leaks != 0 {
+ src.Esc = EscHeap
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ case ODOT,
+ OSLICE,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR,
+ OSLICESTR:
+ escwalk(e, level, dst, src.Left)
+
+ case OINDEX:
+ if Isfixedarray(src.Left.Type) != 0 {
+ escwalk(e, level, dst, src.Left)
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ODOTPTR,
+ OINDEXMAP,
+ OIND:
+ newlevel = level
+
+ if level > MinLevel {
+ newlevel++
+ }
+ escwalk(e, newlevel, dst, src.Left)
+ }
+
+recurse:
+ for ll = src.Escflowsrc; ll != nil; ll = ll.Next {
+ escwalk(e, level, dst, ll.N)
+ }
+
+ e.pdepth--
+}
+
+func esctag(e *EscState, func_ *Node) {
+ var savefn *Node
+ var ll *NodeList
+ var t *Type
+
+ func_.Esc = EscFuncTagged
+
+ // External functions are assumed unsafe,
+ // unless //go:noescape is given before the declaration.
+ if func_.Nbody == nil {
+ if func_.Noescape {
+ for t = getinargx(func_.Type).Type; t != nil; t = t.Down {
+ if haspointers(t.Type) {
+ t.Note = mktag(EscNone)
+ }
+ }
+ }
+
+ return
+ }
+
+ savefn = Curfn
+ Curfn = func_
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op != ONAME || ll.N.Class != PPARAM {
+ continue
+ }
+
+ switch ll.N.Esc & EscMask {
+ case EscNone, // not touched by escflood
+ EscReturn:
+ if haspointers(ll.N.Type) { // don't bother tagging for scalars
+ ll.N.Paramfld.Note = mktag(int(ll.N.Esc))
+ }
+
+ case EscHeap, // touched by escflood, moved to heap
+ EscScope: // touched by escflood, value leaves scope
+ break
+ }
+ }
+
+ Curfn = savefn
+}
diff --git a/src/cmd/internal/gc/export.go b/src/cmd/internal/gc/export.go
new file mode 100644
index 0000000000..5b34fe2563
--- /dev/null
+++ b/src/cmd/internal/gc/export.go
@@ -0,0 +1,596 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+ "unicode"
+ "unicode/utf8"
+)
+
+var asmlist *NodeList
+
+// Mark n's symbol as exported
+func exportsym(n *Node) {
+ if n == nil || n.Sym == nil {
+ return
+ }
+ if n.Sym.Flags&(SymExport|SymPackage) != 0 {
+ if n.Sym.Flags&SymPackage != 0 {
+ Yyerror("export/package mismatch: %v", Sconv(n.Sym, 0))
+ }
+ return
+ }
+
+ n.Sym.Flags |= SymExport
+
+ if Debug['E'] != 0 {
+ fmt.Printf("export symbol %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+}
+
+func exportname(s string) bool {
+ if s[0] < utf8.RuneSelf {
+ return 'A' <= s[0] && s[0] <= 'Z'
+ }
+ r, _ := utf8.DecodeRuneInString(s)
+ return unicode.IsUpper(r)
+}
+
+func initname(s string) int {
+ return bool2int(s == "init")
+}
+
+// exportedsym reports whether a symbol will be visible
+// to files that import our package.
+func exportedsym(sym *Sym) int {
+ // Builtins are visible everywhere.
+ if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
+ return 1
+ }
+
+ return bool2int(sym.Pkg == localpkg && exportname(sym.Name))
+}
+
+func autoexport(n *Node, ctxt int) {
+ if n == nil || n.Sym == nil {
+ return
+ }
+ if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+ return
+ }
+ if n.Ntype != nil && n.Ntype.Op == OTFUNC && n.Ntype.Left != nil { // method
+ return
+ }
+
+ // -A is for cmd/gc/mkbuiltin script, so export everything
+ if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) != 0 {
+ exportsym(n)
+ }
+ if asmhdr != "" && n.Sym.Pkg == localpkg && !(n.Sym.Flags&SymAsm != 0) {
+ n.Sym.Flags |= SymAsm
+ asmlist = list(asmlist, n)
+ }
+}
+
+func dumppkg(p *Pkg) {
+ var suffix string
+
+ if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
+ return
+ }
+ p.Exported = 1
+ suffix = ""
+ if !(p.Direct != 0) {
+ suffix = " // indirect"
+ }
+ fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
+}
+
+// Look for anything we need for the inline body
+func reexportdeplist(ll *NodeList) {
+ for ; ll != nil; ll = ll.Next {
+ reexportdep(ll.N)
+ }
+}
+
+func reexportdep(n *Node) {
+ var t *Type
+
+ if !(n != nil) {
+ return
+ }
+
+ //print("reexportdep %+hN\n", n);
+ switch n.Op {
+ case ONAME:
+ switch n.Class &^ PHEAP {
+ // methods will be printed along with their type
+ // nodes for T.Method expressions
+ case PFUNC:
+ if n.Left != nil && n.Left.Op == OTYPE {
+ break
+ }
+
+ // nodes for method calls.
+ if !(n.Type != nil) || n.Type.Thistuple > 0 {
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ case PEXTERN:
+ if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+ }
+ }
+
+ // Local variables in the bodies need their type.
+ case ODCL:
+ t = n.Left.Type
+
+ if t != Types[t.Etype] && t != idealbool && t != idealstring {
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+
+ case OLITERAL:
+ t = n.Type
+ if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+ fallthrough
+
+ // fallthrough
+ case OTYPE:
+ if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+ }
+
+ // for operations that need a type when rendered, put the type on the export list.
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ORUNESTR,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ ODOTTYPE,
+ ODOTTYPE2,
+ OSTRUCTLIT,
+ OARRAYLIT,
+ OPTRLIT,
+ OMAKEMAP,
+ OMAKESLICE,
+ OMAKECHAN:
+ t = n.Type
+
+ if !(t.Sym != nil) && t.Type != nil {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+
+ reexportdep(n.Left)
+ reexportdep(n.Right)
+ reexportdeplist(n.List)
+ reexportdeplist(n.Rlist)
+ reexportdeplist(n.Ninit)
+ reexportdep(n.Ntest)
+ reexportdep(n.Nincr)
+ reexportdeplist(n.Nbody)
+ reexportdeplist(n.Nelse)
+}
+
+func dumpexportconst(s *Sym) {
+ var n *Node
+ var t *Type
+
+ n = s.Def
+ typecheck(&n, Erv)
+ if n == nil || n.Op != OLITERAL {
+ Fatal("dumpexportconst: oconst nil: %v", Sconv(s, 0))
+ }
+
+ t = n.Type // may or may not be specified
+ dumpexporttype(t)
+
+ if t != nil && !(isideal(t) != 0) {
+ fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+ } else {
+ fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+ }
+}
+
+func dumpexportvar(s *Sym) {
+ var n *Node
+ var t *Type
+
+ n = s.Def
+ typecheck(&n, Erv|Ecall)
+ if n == nil || n.Type == nil {
+ Yyerror("variable exported but not defined: %v", Sconv(s, 0))
+ return
+ }
+
+ t = n.Type
+ dumpexporttype(t)
+
+ if t.Etype == TFUNC && n.Class == PFUNC {
+ if n.Inl != nil {
+ // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+ // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+ if Debug['l'] < 2 {
+ typecheckinl(n)
+ }
+
+ // NOTE: The space after %#S here is necessary for ld's export data parser.
+ fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Inl, obj.FmtSharp))
+
+ reexportdeplist(n.Inl)
+ } else {
+ fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
+ }
+ } else {
+ fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
+ }
+}
+
+type methodbyname []*Type
+
+func (x methodbyname) Len() int {
+ return len(x)
+}
+
+func (x methodbyname) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x methodbyname) Less(i, j int) bool {
+ var a *Type
+ var b *Type
+
+ a = x[i]
+ b = x[j]
+ return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
+}
+
+func dumpexporttype(t *Type) {
+ var f *Type
+ var m []*Type
+ var i int
+ var n int
+
+ if t == nil {
+ return
+ }
+ if t.Printed != 0 || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
+ return
+ }
+ t.Printed = 1
+
+ if t.Sym != nil && t.Etype != TFIELD {
+ dumppkg(t.Sym.Pkg)
+ }
+
+ dumpexporttype(t.Type)
+ dumpexporttype(t.Down)
+
+ if t.Sym == nil || t.Etype == TFIELD {
+ return
+ }
+
+ n = 0
+ for f = t.Method; f != nil; f = f.Down {
+ dumpexporttype(f)
+ n++
+ }
+
+ m = make([]*Type, n)
+ i = 0
+ for f = t.Method; f != nil; f = f.Down {
+ m[i] = f
+ i++
+ }
+ sort.Sort(methodbyname(m[:n]))
+
+ fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
+ for i = 0; i < n; i++ {
+ f = m[i]
+ if f.Nointerface != 0 {
+ fmt.Fprintf(bout, "\t//go:nointerface\n")
+ }
+ if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
+
+ // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+ // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+ if Debug['l'] < 2 {
+ typecheckinl(f.Type.Nname)
+ }
+ fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Inl, obj.FmtSharp))
+ reexportdeplist(f.Type.Nname.Inl)
+ } else {
+ fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
+ }
+ }
+}
+
+func dumpsym(s *Sym) {
+ if s.Flags&SymExported != 0 {
+ return
+ }
+ s.Flags |= SymExported
+
+ if s.Def == nil {
+ Yyerror("unknown export symbol: %v", Sconv(s, 0))
+ return
+ }
+
+ // print("dumpsym %O %+S\n", s->def->op, s);
+ dumppkg(s.Pkg)
+
+ switch s.Def.Op {
+ default:
+ Yyerror("unexpected export symbol: %v %v", Oconv(int(s.Def.Op), 0), Sconv(s, 0))
+
+ case OLITERAL:
+ dumpexportconst(s)
+
+ case OTYPE:
+ if s.Def.Type.Etype == TFORW {
+ Yyerror("export of incomplete type %v", Sconv(s, 0))
+ } else {
+ dumpexporttype(s.Def.Type)
+ }
+
+ case ONAME:
+ dumpexportvar(s)
+ }
+}
+
+func dumpexport() {
+ var l *NodeList
+ var i int32
+ var lno int32
+ var p *Pkg
+
+ lno = lineno
+
+ fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
+ if safemode != 0 {
+ fmt.Fprintf(bout, " safe")
+ }
+ fmt.Fprintf(bout, "\n")
+
+ for i = 0; i < int32(len(phash)); i++ {
+ for p = phash[i]; p != nil; p = p.Link {
+ if p.Direct != 0 {
+ dumppkg(p)
+ }
+ }
+ }
+
+ for l = exportlist; l != nil; l = l.Next {
+ lineno = l.N.Lineno
+ dumpsym(l.N.Sym)
+ }
+
+ fmt.Fprintf(bout, "\n$$\n")
+ lineno = lno
+}
+
+/*
+ * import
+ */
+
+/*
+ * return the sym for ss, which should match lexical
+ */
+func importsym(s *Sym, op int) *Sym {
+ var pkgstr string
+
+ if s.Def != nil && int(s.Def.Op) != op {
+ pkgstr = fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
+ redeclare(s, pkgstr)
+ }
+
+ // mark the symbol so it is not reexported
+ if s.Def == nil {
+ if exportname(s.Name) || initname(s.Name) != 0 {
+ s.Flags |= SymExport
+ } else {
+ s.Flags |= SymPackage // package scope
+ }
+ }
+
+ return s
+}
+
+/*
+ * return the type pkg.name, forward declaring if needed
+ */
+func pkgtype(s *Sym) *Type {
+ var t *Type
+
+ importsym(s, OTYPE)
+ if s.Def == nil || s.Def.Op != OTYPE {
+ t = typ(TFORW)
+ t.Sym = s
+ s.Def = typenod(t)
+ }
+
+ if s.Def.Type == nil {
+ Yyerror("pkgtype %v", Sconv(s, 0))
+ }
+ return s.Def.Type
+}
+
+func importimport(s *Sym, z *Strlit) {
+ // Informational: record package name
+ // associated with import path, for use in
+ // human-readable messages.
+ var p *Pkg
+
+ if isbadimport(z) {
+ errorexit()
+ }
+ p = mkpkg(z)
+ if p.Name == "" {
+ p.Name = s.Name
+ Pkglookup(s.Name, nil).Npkg++
+ } else if p.Name != s.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
+ }
+
+ if !(incannedimport != 0) && myimportpath != "" && z.S == myimportpath {
+ Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
+ errorexit()
+ }
+}
+
+func importconst(s *Sym, t *Type, n *Node) {
+ var n1 *Node
+
+ importsym(s, OLITERAL)
+ Convlit(&n, t)
+
+ if s.Def != nil { // TODO: check if already the same.
+ return
+ }
+
+ if n.Op != OLITERAL {
+ Yyerror("expression must be a constant")
+ return
+ }
+
+ if n.Sym != nil {
+ n1 = Nod(OXXX, nil, nil)
+ *n1 = *n
+ n = n1
+ }
+
+ n.Orig = newname(s)
+ n.Sym = s
+ declare(n, PEXTERN)
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import const %v\n", Sconv(s, 0))
+ }
+}
+
+func importvar(s *Sym, t *Type) {
+ var n *Node
+
+ importsym(s, ONAME)
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ return
+ }
+ Yyerror("inconsistent definition for var %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(s, 0), Tconv(s.Def.Type, 0), Zconv(s.Importdef.Path, 0), Tconv(t, 0), Zconv(importpkg.Path, 0))
+ }
+
+ n = newname(s)
+ s.Importdef = importpkg
+ n.Type = t
+ declare(n, PEXTERN)
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import var %v %v\n", Sconv(s, 0), Tconv(t, obj.FmtLong))
+ }
+}
+
+func importtype(pt *Type, t *Type) {
+ var n *Node
+
+ // override declaration in unsafe.go for Pointer.
+ // there is no way in Go code to define unsafe.Pointer
+ // so we have to supply it.
+ if incannedimport != 0 && importpkg.Name == "unsafe" && pt.Nod.Sym.Name == "Pointer" {
+ t = Types[TUNSAFEPTR]
+ }
+
+ if pt.Etype == TFORW {
+ n = pt.Nod
+ copytype(pt.Nod, t)
+ pt.Nod = n // unzero nod
+ pt.Sym.Importdef = importpkg
+ pt.Sym.Lastlineno = int32(parserline())
+ declare(n, PEXTERN)
+ checkwidth(pt)
+ } else if !Eqtype(pt.Orig, t) {
+ Yyerror("inconsistent definition for type %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(pt.Sym, 0), Tconv(pt, obj.FmtLong), Zconv(pt.Sym.Importdef.Path, 0), Tconv(t, obj.FmtLong), Zconv(importpkg.Path, 0))
+ }
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import type %v %v\n", Tconv(pt, 0), Tconv(t, obj.FmtLong))
+ }
+}
+
+func dumpasmhdr() {
+ var b *obj.Biobuf
+ var l *NodeList
+ var n *Node
+ var t *Type
+
+ b, err := obj.Bopenw(asmhdr)
+ if err != nil {
+ Fatal("%v", err)
+ }
+ fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
+ for l = asmlist; l != nil; l = l.Next {
+ n = l.N
+ if isblanksym(n.Sym) {
+ continue
+ }
+ switch n.Op {
+ case OLITERAL:
+ fmt.Fprintf(b, "#define const_%s %v\n", n.Sym.Name, Vconv(&n.Val, obj.FmtSharp))
+
+ case OTYPE:
+ t = n.Type
+ if t.Etype != TSTRUCT || t.Map != nil || t.Funarg != 0 {
+ break
+ }
+ fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
+ for t = t.Type; t != nil; t = t.Down {
+ if !isblanksym(t.Sym) {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Width))
+ }
+ }
+ }
+ }
+
+ obj.Bterm(b)
+}
diff --git a/src/cmd/internal/gc/fmt.go b/src/cmd/internal/gc/fmt.go
new file mode 100644
index 0000000000..08c08a4199
--- /dev/null
+++ b/src/cmd/internal/gc/fmt.go
@@ -0,0 +1,1953 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+//
+// Format conversions
+// %L int Line numbers
+//
+// %E int etype values (aka 'Kind')
+//
+// %O int Node Opcodes
+// Flags: "%#O": print go syntax. (automatic unless fmtmode == FDbg)
+//
+// %J Node* Node details
+// Flags: "%hJ" suppresses things not relevant until walk.
+//
+// %V Val* Constant values
+//
+// %S Sym* Symbols
+// Flags: +,- #: mode (see below)
+// "%hS" unqualified identifier in any mode
+// "%hhS" in export mode: unqualified identifier if exported, qualified if not
+//
+// %T Type* Types
+// Flags: +,- #: mode (see below)
+// 'l' definition instead of name.
+// 'h' omit "func" and receiver in function types
+// 'u' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
+//
+// %N Node* Nodes
+// Flags: +,- #: mode (see below)
+// 'h' (only in +/debug mode) suppress recursion
+// 'l' (only in Error mode) print "foo (type Bar)"
+//
+// %H NodeList* NodeLists
+// Flags: those of %N
+// ',' separate items with ',' instead of ';'
+//
+// %Z Strlit* String literals
+//
+// In mparith1.c:
+// %B Mpint* Big integers
+// %F Mpflt* Big floats
+//
+// %S, %T and %N obey use the following flags to set the format mode:
+const (
+ FErr = iota
+ FDbg
+ FExp
+ FTypeId
+)
+
+var fmtmode int = FErr
+
+var fmtpkgpfx int // %uT stickyness
+
+//
+// E.g. for %S: %+S %#S %-S print an identifier properly qualified for debug/export/internal mode.
+//
+// The mode flags +, - and # are sticky, meaning they persist through
+// recursions of %N, %T and %S, but not the h and l flags. The u flag is
+// sticky only on %T recursions and only used in %-/Sym mode.
+
+//
+// Useful format combinations:
+//
+// %+N %+H multiline recursive debug dump of node/nodelist
+// %+hN %+hH non recursive debug dump
+//
+// %#N %#T export format
+// %#lT type definition instead of name
+// %#hT omit"func" and receiver in function signature
+//
+// %lN "foo (type Bar)" for error messages
+//
+// %-T type identifiers
+// %-hT type identifiers without "func" and arg names in type signatures (methodsym)
+// %-uT type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
+//
+
+func setfmode(flags *int) int {
+ var fm int
+
+ fm = fmtmode
+ if *flags&obj.FmtSign != 0 {
+ fmtmode = FDbg
+ } else if *flags&obj.FmtSharp != 0 {
+ fmtmode = FExp
+ } else if *flags&obj.FmtLeft != 0 {
+ fmtmode = FTypeId
+ }
+
+ *flags &^= (obj.FmtSharp | obj.FmtLeft | obj.FmtSign)
+ return fm
+}
+
+// Fmt "%L": Linenumbers
+
+var goopnames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OCOM: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODEC: "--",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINC: "++",
+ OIND: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ OMINUS: "-",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OXOR: "^",
+}
+
+// Fmt "%O": Node opcodes
+func Oconv(o int, flag int) string {
+ var fp string
+
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode != FDbg {
+ if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+ fp += goopnames[o]
+ return fp
+ }
+ }
+
+ if o >= 0 && o < len(opnames) && opnames[o] != "" {
+ fp += opnames[o]
+ return fp
+ }
+
+ fp += fmt.Sprintf("O-%d", o)
+ return fp
+}
+
+var classnames = []string{
+ "Pxxx",
+ "PEXTERN",
+ "PAUTO",
+ "PPARAM",
+ "PPARAMOUT",
+ "PPARAMREF",
+ "PFUNC",
+}
+
+// Fmt "%J": Node details.
+func Jconv(n *Node, flag int) string {
+ var fp string
+
+ var s string
+ var c int
+
+ c = flag & obj.FmtShort
+
+ if !(c != 0) && n.Ullman != 0 {
+ fp += fmt.Sprintf(" u(%d)", n.Ullman)
+ }
+
+ if !(c != 0) && n.Addable != 0 {
+ fp += fmt.Sprintf(" a(%d)", n.Addable)
+ }
+
+ if !(c != 0) && n.Vargen != 0 {
+ fp += fmt.Sprintf(" g(%d)", n.Vargen)
+ }
+
+ if n.Lineno != 0 {
+ fp += fmt.Sprintf(" l(%d)", n.Lineno)
+ }
+
+ if !(c != 0) && n.Xoffset != BADWIDTH {
+ fp += fmt.Sprintf(" x(%d%+d)", n.Xoffset, n.Stkdelta)
+ }
+
+ if n.Class != 0 {
+ s = ""
+ if n.Class&PHEAP != 0 {
+ s = ",heap"
+ }
+ if int(n.Class&^PHEAP) < len(classnames) {
+ fp += fmt.Sprintf(" class(%s%s)", classnames[n.Class&^PHEAP], s)
+ } else {
+ fp += fmt.Sprintf(" class(%d?%s)", n.Class&^PHEAP, s)
+ }
+ }
+
+ if n.Colas != 0 {
+ fp += fmt.Sprintf(" colas(%d)", n.Colas)
+ }
+
+ if n.Funcdepth != 0 {
+ fp += fmt.Sprintf(" f(%d)", n.Funcdepth)
+ }
+
+ switch n.Esc {
+ case EscUnknown:
+ break
+
+ case EscHeap:
+ fp += fmt.Sprintf(" esc(h)")
+
+ case EscScope:
+ fp += fmt.Sprintf(" esc(s)")
+
+ case EscNone:
+ fp += fmt.Sprintf(" esc(no)")
+
+ case EscNever:
+ if !(c != 0) {
+ fp += fmt.Sprintf(" esc(N)")
+ }
+
+ default:
+ fp += fmt.Sprintf(" esc(%d)", n.Esc)
+ }
+
+ if n.Escloopdepth != 0 {
+ fp += fmt.Sprintf(" ld(%d)", n.Escloopdepth)
+ }
+
+ if !(c != 0) && n.Typecheck != 0 {
+ fp += fmt.Sprintf(" tc(%d)", n.Typecheck)
+ }
+
+ if !(c != 0) && n.Dodata != 0 {
+ fp += fmt.Sprintf(" dd(%d)", n.Dodata)
+ }
+
+ if n.Isddd != 0 {
+ fp += fmt.Sprintf(" isddd(%d)", n.Isddd)
+ }
+
+ if n.Implicit != 0 {
+ fp += fmt.Sprintf(" implicit(%d)", n.Implicit)
+ }
+
+ if n.Embedded != 0 {
+ fp += fmt.Sprintf(" embedded(%d)", n.Embedded)
+ }
+
+ if n.Addrtaken != 0 {
+ fp += fmt.Sprintf(" addrtaken")
+ }
+
+ if n.Assigned != 0 {
+ fp += fmt.Sprintf(" assigned")
+ }
+
+ if !(c != 0) && n.Used != 0 {
+ fp += fmt.Sprintf(" used(%d)", n.Used)
+ }
+ return fp
+}
+
+// Fmt "%V": Values
+func Vconv(v *Val, flag int) string {
+ var fp string
+
+ var x int64
+
+ switch v.Ctype {
+ case CTINT:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("%v", Bconv(v.U.Xval, obj.FmtSharp))
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Bconv(v.U.Xval, 0))
+ return fp
+
+ case CTRUNE:
+ x = Mpgetfix(v.U.Xval)
+ if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
+ fp += fmt.Sprintf("'%c'", int(x))
+ return fp
+ }
+ if 0 <= x && x < 1<<16 {
+ fp += fmt.Sprintf("'\\u%04x'", uint(int(x)))
+ return fp
+ }
+ if 0 <= x && x <= utf8.MaxRune {
+ fp += fmt.Sprintf("'\\U%08x'", uint64(x))
+ return fp
+ }
+ fp += fmt.Sprintf("('\\x00' + %v)", Bconv(v.U.Xval, 0))
+ return fp
+
+ case CTFLT:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("%v", Fconv(v.U.Fval, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Fconv(v.U.Fval, obj.FmtSharp))
+ return fp
+
+ case CTCPLX:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, 0), Fconv(&v.U.Cval.Imag, 0))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Real, 0) == 0 {
+ fp += fmt.Sprintf("%vi", Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) == 0 {
+ fp += fmt.Sprintf("%v", Fconv(&v.U.Cval.Real, obj.FmtSharp))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) < 0 {
+ fp += fmt.Sprintf("(%v%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+ }
+ fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+
+ case CTSTR:
+ fp += fmt.Sprintf("\"%v\"", Zconv(v.U.Sval, 0))
+ return fp
+
+ case CTBOOL:
+ if v.U.Bval != 0 {
+ fp += "true"
+ return fp
+ }
+ fp += "false"
+ return fp
+
+ case CTNIL:
+ fp += "nil"
+ return fp
+ }
+
+ fp += fmt.Sprintf("<ctype=%d>", v.Ctype)
+ return fp
+}
+
+// Fmt "%Z": escaped string literals
+func Zconv(sp *Strlit, flag int) string {
+ var fp string
+ var s string
+ var n int
+
+ if sp == nil {
+ fp += "<nil>"
+ return fp
+ }
+
+ // NOTE: Keep in sync with ../ld/go.c:/^Zconv.
+ s = sp.S
+ for i := 0; i < len(s); i += n {
+ var r rune
+ r, n = utf8.DecodeRuneInString(s[i:])
+ switch r {
+ case utf8.RuneError:
+ if n == 1 {
+ fp += fmt.Sprintf("\\x%02x", s[i])
+ break
+ }
+ fallthrough
+
+ // fall through
+ default:
+ if r < ' ' {
+ fp += fmt.Sprintf("\\x%02x", r)
+ break
+ }
+
+ fp += string(r)
+
+ case '\t':
+ fp += "\\t"
+
+ case '\n':
+ fp += "\\n"
+
+ case '"',
+ '\\':
+ fp += `\` + string(r)
+
+ case 0xFEFF: // BOM, basically disallowed in source code
+ fp += "\\uFEFF"
+ }
+ }
+
+ return fp
+}
+
+/*
+s%,%,\n%g
+s%\n+%\n%g
+s%^[ ]*T%%g
+s%,.*%%g
+s%.+% [T&] = "&",%g
+s%^ ........*\]%&~%g
+s%~ %%g
+*/
+var etnames = []string{
+ TINT: "INT",
+ TUINT: "UINT",
+ TINT8: "INT8",
+ TUINT8: "UINT8",
+ TINT16: "INT16",
+ TUINT16: "UINT16",
+ TINT32: "INT32",
+ TUINT32: "UINT32",
+ TINT64: "INT64",
+ TUINT64: "UINT64",
+ TUINTPTR: "UINTPTR",
+ TFLOAT32: "FLOAT32",
+ TFLOAT64: "FLOAT64",
+ TCOMPLEX64: "COMPLEX64",
+ TCOMPLEX128: "COMPLEX128",
+ TBOOL: "BOOL",
+ TPTR32: "PTR32",
+ TPTR64: "PTR64",
+ TFUNC: "FUNC",
+ TARRAY: "ARRAY",
+ TSTRUCT: "STRUCT",
+ TCHAN: "CHAN",
+ TMAP: "MAP",
+ TINTER: "INTER",
+ TFORW: "FORW",
+ TFIELD: "FIELD",
+ TSTRING: "STRING",
+ TANY: "ANY",
+}
+
+// Fmt "%E": etype
+func Econv(et int, flag int) string {
+ var fp string
+
+ if et >= 0 && et < len(etnames) && etnames[et] != "" {
+ fp += etnames[et]
+ return fp
+ }
+ fp += fmt.Sprintf("E-%d", et)
+ return fp
+}
+
+// Fmt "%S": syms
+func symfmt(s *Sym, flag int) string {
+ var fp string
+
+ var p string
+
+ if s.Pkg != nil && !(flag&obj.FmtShort != 0 /*untyped*/) {
+ switch fmtmode {
+ case FErr: // This is for the user
+ if s.Pkg == localpkg {
+ fp += s.Name
+ return fp
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if s.Pkg.Name != "" && Pkglookup(s.Pkg.Name, nil).Npkg > 1 {
+ fp += fmt.Sprintf("\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
+ return fp
+ }
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp
+
+ case FDbg:
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp
+
+ case FTypeId:
+ if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp // dcommontype, typehash
+ }
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+ return fp // (methodsym), typesym, weaksym
+
+ case FExp:
+ if s.Name != "" && s.Name[0] == '.' {
+ Fatal("exporting synthetic symbol %s", s.Name)
+ }
+ if s.Pkg != builtinpkg {
+ fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
+ return fp
+ }
+ }
+ }
+
+ if flag&obj.FmtByte != 0 /*untyped*/ { // FmtByte (hh) implies FmtShort (h)
+
+ // skip leading "type." in method name
+ p = s.Name
+ if i := strings.LastIndex(s.Name, "."); i >= 0 {
+ p = s.Name[i+1:]
+ }
+
+ // exportname needs to see the name without the prefix too.
+ if (fmtmode == FExp && !exportname(p)) || fmtmode == FDbg {
+ fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), p)
+ return fp
+ }
+
+ fp += p
+ return fp
+ }
+
+ fp += s.Name
+ return fp
+}
+
+var basicnames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+func typefmt(t *Type, flag int) string {
+ var fp string
+
+ var t1 *Type
+ var s *Sym
+
+ if t == nil {
+ fp += "<T>"
+ return fp
+ }
+
+ if t == bytetype || t == runetype {
+ // in %-T mode collapse rune and byte with their originals.
+ if fmtmode != FTypeId {
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
+ return fp
+ }
+ t = Types[t.Etype]
+ }
+
+ if t == errortype {
+ fp += "error"
+ return fp
+ }
+
+ // Unless the 'l' flag was specified, if the type has a name, just print that name.
+ if !(flag&obj.FmtLong != 0 /*untyped*/) && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
+ switch fmtmode {
+ case FTypeId:
+ if flag&obj.FmtShort != 0 /*untyped*/ {
+ if t.Vargen != 0 {
+ fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, obj.FmtShort), t.Vargen)
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
+ return fp
+ }
+
+ if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtUnsigned))
+ return fp
+ }
+ fallthrough
+
+ // fallthrough
+ case FExp:
+ if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+ fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, 0), t.Vargen)
+ return fp
+ }
+ }
+
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, 0))
+ return fp
+ }
+
+ if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+ if fmtmode == FErr && (t == idealbool || t == idealstring) {
+ fp += "untyped "
+ }
+ fp += basicnames[t.Etype]
+ return fp
+ }
+
+ if fmtmode == FDbg {
+ fp += fmt.Sprintf("%v-", Econv(int(t.Etype), 0))
+ }
+
+ switch t.Etype {
+ case TPTR32,
+ TPTR64:
+ if fmtmode == FTypeId && (flag&obj.FmtShort != 0 /*untyped*/) {
+ fp += fmt.Sprintf("*%v", Tconv(t.Type, obj.FmtShort))
+ return fp
+ }
+ fp += fmt.Sprintf("*%v", Tconv(t.Type, 0))
+ return fp
+
+ case TARRAY:
+ if t.Bound >= 0 {
+ fp += fmt.Sprintf("[%d]%v", t.Bound, Tconv(t.Type, 0))
+ return fp
+ }
+ if t.Bound == -100 {
+ fp += fmt.Sprintf("[...]%v", Tconv(t.Type, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("[]%v", Tconv(t.Type, 0))
+ return fp
+
+ case TCHAN:
+ switch t.Chan {
+ case Crecv:
+ fp += fmt.Sprintf("<-chan %v", Tconv(t.Type, 0))
+ return fp
+
+ case Csend:
+ fp += fmt.Sprintf("chan<- %v", Tconv(t.Type, 0))
+ return fp
+ }
+
+ if t.Type != nil && t.Type.Etype == TCHAN && t.Type.Sym == nil && t.Type.Chan == Crecv {
+ fp += fmt.Sprintf("chan (%v)", Tconv(t.Type, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("chan %v", Tconv(t.Type, 0))
+ return fp
+
+ case TMAP:
+ fp += fmt.Sprintf("map[%v]%v", Tconv(t.Down, 0), Tconv(t.Type, 0))
+ return fp
+
+ case TINTER:
+ fp += "interface {"
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if exportname(t1.Sym.Name) {
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf(" %v%v ", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
+ }
+ } else {
+ // non-exported method names must be qualified
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtUnsigned), Tconv(t1.Type, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf(" %v%v ", Sconv(t1.Sym, obj.FmtUnsigned), Tconv(t1.Type, obj.FmtShort))
+ }
+ }
+ }
+
+ fp += "}"
+ return fp
+
+ case TFUNC:
+ if flag&obj.FmtShort != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v", Tconv(getinargx(t), 0))
+ } else {
+ if t.Thistuple != 0 {
+ fp += fmt.Sprintf("method%v func%v", Tconv(getthisx(t), 0), Tconv(getinargx(t), 0))
+ } else {
+ fp += fmt.Sprintf("func%v", Tconv(getinargx(t), 0))
+ }
+ }
+
+ switch t.Outtuple {
+ case 0:
+ break
+
+ case 1:
+ if fmtmode != FExp {
+ fp += fmt.Sprintf(" %v", Tconv(getoutargx(t).Type.Type, 0)) // struct->field->field's type
+ break
+ }
+ fallthrough
+
+ default:
+ fp += fmt.Sprintf(" %v", Tconv(getoutargx(t), 0))
+ }
+
+ return fp
+
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ case TSTRUCT:
+ if t.Map != nil {
+ if t.Map.Bucket == t {
+ fp += fmt.Sprintf("map.bucket[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ if t.Map.Hmap == t {
+ fp += fmt.Sprintf("map.hdr[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ if t.Map.Hiter == t {
+ fp += fmt.Sprintf("map.iter[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ Yyerror("unknown internal map type")
+ }
+
+ if t.Funarg != 0 {
+ fp += "("
+ if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf("%v, ", Tconv(t1, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t1, obj.FmtShort))
+ }
+ }
+ } else {
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf("%v, ", Tconv(t1, 0))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t1, 0))
+ }
+ }
+ }
+
+ fp += ")"
+ } else {
+ fp += "struct {"
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v;", Tconv(t1, obj.FmtLong))
+ } else {
+ fp += fmt.Sprintf(" %v ", Tconv(t1, obj.FmtLong))
+ }
+ }
+ fp += "}"
+ }
+
+ return fp
+
+ case TFIELD:
+ if !(flag&obj.FmtShort != 0 /*untyped*/) {
+ s = t.Sym
+
+ // Take the name from the original, lest we substituted it with ~r%d or ~b%d.
+ // ~r%d is a (formerly) unnamed result.
+ if (fmtmode == FErr || fmtmode == FExp) && t.Nname != nil {
+ if t.Nname.Orig != nil {
+ s = t.Nname.Orig.Sym
+ if s != nil && s.Name[0] == '~' {
+ if s.Name[1] == 'r' { // originally an unnamed result
+ s = nil
+ } else if s.Name[1] == 'b' { // originally the blank identifier _
+ s = Lookup("_")
+ }
+ }
+ } else {
+ s = nil
+ }
+ }
+
+ if s != nil && !(t.Embedded != 0) {
+ if t.Funarg != 0 {
+ fp += fmt.Sprintf("%v ", Nconv(t.Nname, 0))
+ } else if flag&obj.FmtLong != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v ", Sconv(s, obj.FmtShort|obj.FmtByte)) // qualify non-exported names (used on structs, not on funarg)
+ } else {
+ fp += fmt.Sprintf("%v ", Sconv(s, 0))
+ }
+ } else if fmtmode == FExp {
+ // TODO(rsc) this breaks on the eliding of unused arguments in the backend
+ // when this is fixed, the special case in dcl.c checkarglist can go.
+ //if(t->funarg)
+ // fmtstrcpy(fp, "_ ");
+ //else
+ if t.Embedded != 0 && s.Pkg != nil && len(s.Pkg.Path.S) > 0 {
+ fp += fmt.Sprintf("@\"%v\".? ", Zconv(s.Pkg.Path, 0))
+ } else {
+ fp += "? "
+ }
+ }
+ }
+
+ if t.Isddd != 0 {
+ fp += fmt.Sprintf("...%v", Tconv(t.Type.Type, 0))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t.Type, 0))
+ }
+
+ if !(flag&obj.FmtShort != 0 /*untyped*/) && t.Note != nil {
+ fp += fmt.Sprintf(" \"%v\"", Zconv(t.Note, 0))
+ }
+ return fp
+
+ case TFORW:
+ if t.Sym != nil {
+ fp += fmt.Sprintf("undefined %v", Sconv(t.Sym, 0))
+ return fp
+ }
+ fp += "undefined"
+ return fp
+
+ case TUNSAFEPTR:
+ if fmtmode == FExp {
+ fp += fmt.Sprintf("@\"unsafe\".Pointer")
+ return fp
+ }
+ fp += fmt.Sprintf("unsafe.Pointer")
+ return fp
+ }
+
+ if fmtmode == FExp {
+ Fatal("missing %v case during export", Econv(int(t.Etype), 0))
+ }
+
+ // Don't know how to handle - fall back to detailed prints.
+ fp += fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), Sconv(t.Sym, 0), Tconv(t.Type, 0))
+ return fp
+}
+
+// Statements which may be rendered with a simplestmt as init.
+func stmtwithinit(op int) int {
+ switch op {
+ case OIF,
+ OFOR,
+ OSWITCH:
+ return 1
+ }
+
+ return 0
+}
+
+func stmtfmt(n *Node) string {
+ var f string
+
+ var complexinit int
+ var simpleinit int
+ var extrablock int
+
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit = bool2int(n.Ninit != nil && !(n.Ninit.Next != nil) && !(n.Ninit.N.Ninit != nil) && stmtwithinit(int(n.Op)) != 0)
+
+ // otherwise, print the inits as separate statements
+ complexinit = bool2int(n.Ninit != nil && !(simpleinit != 0) && (fmtmode != FErr))
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock = bool2int(complexinit != 0 && stmtwithinit(int(n.Op)) != 0)
+
+ if extrablock != 0 {
+ f += "{"
+ }
+
+ if complexinit != 0 {
+ f += fmt.Sprintf(" %v; ", Hconv(n.Ninit, 0))
+ }
+
+ switch n.Op {
+ case ODCL:
+ if fmtmode == FExp {
+ switch n.Left.Class &^ PHEAP {
+ case PPARAM,
+ PPARAMOUT,
+ PAUTO:
+ f += fmt.Sprintf("var %v %v", Nconv(n.Left, 0), Tconv(n.Left.Type, 0))
+ goto ret
+ }
+ }
+
+ f += fmt.Sprintf("var %v %v", Sconv(n.Left.Sym, 0), Tconv(n.Left.Type, 0))
+
+ case ODCLFIELD:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ } else {
+ f += fmt.Sprintf("%v", Nconv(n.Right, 0))
+ }
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typecheck to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ if fmtmode == FExp && n.Right == nil {
+ break
+ }
+
+ if n.Colas != 0 && !(complexinit != 0) {
+ f += fmt.Sprintf("%v := %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ } else {
+ f += fmt.Sprintf("%v = %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ }
+
+ case OASOP:
+ if n.Implicit != 0 {
+ if n.Etype == OADD {
+ f += fmt.Sprintf("%v++", Nconv(n.Left, 0))
+ } else {
+ f += fmt.Sprintf("%v--", Nconv(n.Left, 0))
+ }
+ break
+ }
+
+ f += fmt.Sprintf("%v %v= %v", Nconv(n.Left, 0), Oconv(int(n.Etype), obj.FmtSharp), Nconv(n.Right, 0))
+
+ case OAS2:
+ if n.Colas != 0 && !(complexinit != 0) {
+ f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ case OAS2DOTTYPE,
+ OAS2FUNC,
+ OAS2MAPR,
+ OAS2RECV:
+ f += fmt.Sprintf("%v = %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+
+ case ORETURN:
+ f += fmt.Sprintf("return %v", Hconv(n.List, obj.FmtComma))
+
+ case ORETJMP:
+ f += fmt.Sprintf("retjmp %v", Sconv(n.Sym, 0))
+
+ case OPROC:
+ f += fmt.Sprintf("go %v", Nconv(n.Left, 0))
+
+ case ODEFER:
+ f += fmt.Sprintf("defer %v", Nconv(n.Left, 0))
+
+ case OIF:
+ if simpleinit != 0 {
+ f += fmt.Sprintf("if %v; %v { %v }", Nconv(n.Ninit.N, 0), Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
+ } else {
+ f += fmt.Sprintf("if %v { %v }", Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
+ }
+ if n.Nelse != nil {
+ f += fmt.Sprintf(" else { %v }", Hconv(n.Nelse, 0))
+ }
+
+ case OFOR:
+ if fmtmode == FErr { // TODO maybe only if FmtShort, same below
+ f += "for loop"
+ break
+ }
+
+ f += "for"
+ if simpleinit != 0 {
+ f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
+ } else if n.Nincr != nil {
+ f += " ;"
+ }
+
+ if n.Ntest != nil {
+ f += fmt.Sprintf(" %v", Nconv(n.Ntest, 0))
+ }
+
+ if n.Nincr != nil {
+ f += fmt.Sprintf("; %v", Nconv(n.Nincr, 0))
+ } else if simpleinit != 0 {
+ f += ";"
+ }
+
+ f += fmt.Sprintf(" { %v }", Hconv(n.Nbody, 0))
+
+ case ORANGE:
+ if fmtmode == FErr {
+ f += "for loop"
+ break
+ }
+
+ if n.List == nil {
+ f += fmt.Sprintf("for range %v { %v }", Nconv(n.Right, 0), Hconv(n.Nbody, 0))
+ break
+ }
+
+ f += fmt.Sprintf("for %v = range %v { %v }", Hconv(n.List, obj.FmtComma), Nconv(n.Right, 0), Hconv(n.Nbody, 0))
+
+ case OSELECT,
+ OSWITCH:
+ if fmtmode == FErr {
+ f += fmt.Sprintf("%v statement", Oconv(int(n.Op), 0))
+ break
+ }
+
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ if simpleinit != 0 {
+ f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
+ }
+ if n.Ntest != nil {
+ f += fmt.Sprintf("%v", Nconv(n.Ntest, 0))
+ }
+
+ f += fmt.Sprintf(" { %v }", Hconv(n.List, 0))
+
+ case OCASE,
+ OXCASE:
+ if n.List != nil {
+ f += fmt.Sprintf("case %v: %v", Hconv(n.List, obj.FmtComma), Hconv(n.Nbody, 0))
+ } else {
+ f += fmt.Sprintf("default: %v", Hconv(n.Nbody, 0))
+ }
+
+ case OBREAK,
+ OCONTINUE,
+ OGOTO,
+ OFALL,
+ OXFALL:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v %v", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
+ } else {
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ }
+
+ case OEMPTY:
+ break
+
+ case OLABEL:
+ f += fmt.Sprintf("%v: ", Nconv(n.Left, 0))
+ }
+
+ret:
+ if extrablock != 0 {
+ f += "}"
+ }
+
+ return f
+}
+
+var opprec = []int{
+ OAPPEND: 8,
+ OARRAYBYTESTR: 8,
+ OARRAYLIT: 8,
+ OARRAYRUNESTR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLOSE: 8,
+ OCONVIFACE: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONONAME: 8,
+ OPACK: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSTRARRAYBYTE: 8,
+ OSTRARRAYRUNE: 8,
+ OSTRUCTLIT: 8,
+ OTARRAY: 8,
+ OTCHAN: 8,
+ OTFUNC: 8,
+ OTINTER: 8,
+ OTMAP: 8,
+ OTSTRUCT: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OCALLPART: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OCOM: 7,
+ OMINUS: 7,
+ OADDR: 7,
+ OIND: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OCMPSTR: 4,
+ OCMPIFACE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+ OAS:// Statements handled by stmtfmt
+ -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODCLFIELD: -1,
+ ODEFER: -1,
+ OEMPTY: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OPROC: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+ OXCASE: -1,
+ OXFALL: -1,
+ OEND: 0,
+}
+
+func exprfmt(n *Node, prec int) string {
+ var f string
+
+ var nprec int
+ var ptrlit int
+ var l *NodeList
+
+ for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
+ n = n.Left
+ }
+
+ if n == nil {
+ f += "<N>"
+ return f
+ }
+
+ nprec = opprec[n.Op]
+ if n.Op == OTYPE && n.Sym != nil {
+ nprec = 8
+ }
+
+ if prec > nprec {
+ f += fmt.Sprintf("(%v)", Nconv(n, 0))
+ return f
+ }
+
+ switch n.Op {
+ case OPAREN:
+ f += fmt.Sprintf("(%v)", Nconv(n.Left, 0))
+ return f
+
+ case ODDDARG:
+ f += fmt.Sprintf("... argument")
+ return f
+
+ case OREGISTER:
+ f += fmt.Sprintf("%v", Ctxt.Rconv(int(n.Val.U.Reg)))
+ return f
+
+ case OLITERAL: // this is a bit of a mess
+ if fmtmode == FErr && n.Sym != nil {
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+ }
+ if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n {
+ f += exprfmt(n.Orig, prec)
+ return f
+ }
+ if n.Type != nil && n.Type != Types[n.Type.Etype] && n.Type != idealbool && n.Type != idealstring {
+ // Need parens when type begins with what might
+ // be misinterpreted as a unary operator: * or <-.
+ if Isptr[n.Type.Etype] != 0 || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
+ f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
+ return f
+ }
+ }
+
+ f += fmt.Sprintf("%v", Vconv(&n.Val, 0))
+ return f
+
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ case ONAME:
+ if fmtmode == FExp && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ f += fmt.Sprintf("_")
+ return f
+ }
+ if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 {
+ f += fmt.Sprintf("%v·%d", Sconv(n.Sym, 0), n.Vargen)
+ return f
+ }
+
+ // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
+ // but for export, this should be rendered as (*pkg.T).meth.
+ // These nodes have the special property that they are names with a left OTYPE and a right ONAME.
+ if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
+ if Isptr[n.Left.Type.Etype] != 0 {
+ f += fmt.Sprintf("(%v).%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+ } else {
+ f += fmt.Sprintf("%v.%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+ }
+ }
+ fallthrough
+
+ //fallthrough
+ case OPACK,
+ ONONAME:
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+
+ case OTYPE:
+ if n.Type == nil && n.Sym != nil {
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v", Tconv(n.Type, 0))
+ return f
+
+ case OTARRAY:
+ if n.Left != nil {
+ f += fmt.Sprintf("[]%v", Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("[]%v", Nconv(n.Right, 0))
+ return f // happens before typecheck
+
+ case OTMAP:
+ f += fmt.Sprintf("map[%v]%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+
+ case OTCHAN:
+ switch n.Etype {
+ case Crecv:
+ f += fmt.Sprintf("<-chan %v", Nconv(n.Left, 0))
+ return f
+
+ case Csend:
+ f += fmt.Sprintf("chan<- %v", Nconv(n.Left, 0))
+ return f
+
+ default:
+ if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
+ f += fmt.Sprintf("chan (%v)", Nconv(n.Left, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("chan %v", Nconv(n.Left, 0))
+ return f
+ }
+ }
+ fallthrough
+
+ case OTSTRUCT:
+ f += fmt.Sprintf("<struct>")
+ return f
+
+ case OTINTER:
+ f += fmt.Sprintf("<inter>")
+ return f
+
+ case OTFUNC:
+ f += fmt.Sprintf("<func>")
+ return f
+
+ case OCLOSURE:
+ if fmtmode == FErr {
+ f += "func literal"
+ return f
+ }
+ if n.Nbody != nil {
+ f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Nbody, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Closure.Nbody, 0))
+ return f
+
+ case OCOMPLIT:
+ ptrlit = bool2int(n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0)
+ if fmtmode == FErr {
+ if n.Right != nil && n.Right.Type != nil && !(n.Implicit != 0) {
+ if ptrlit != 0 {
+ f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v literal", Tconv(n.Right.Type, 0))
+ return f
+ }
+ }
+
+ f += "composite literal"
+ return f
+ }
+
+ if fmtmode == FExp && ptrlit != 0 {
+ // typecheck has overwritten OIND by OTYPE with pointer type.
+ f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+
+ f += fmt.Sprintf("(%v{ %v })", Nconv(n.Right, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OPTRLIT:
+ if fmtmode == FExp && n.Left.Implicit != 0 {
+ f += fmt.Sprintf("%v", Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("&%v", Nconv(n.Left, 0))
+ return f
+
+ case OSTRUCTLIT:
+ if fmtmode == FExp { // requires special handling of field names
+ if n.Implicit != 0 {
+ f += "{"
+ } else {
+ f += fmt.Sprintf("(%v{", Tconv(n.Type, 0))
+ }
+ for l = n.List; l != nil; l = l.Next {
+ f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(l.N.Right, 0))
+
+ if l.Next != nil {
+ f += ","
+ } else {
+ f += " "
+ }
+ }
+
+ if !(n.Implicit != 0) {
+ f += "})"
+ return f
+ }
+ f += "}"
+ return f
+ }
+ fallthrough
+
+ // fallthrough
+
+ case OARRAYLIT,
+ OMAPLIT:
+ if fmtmode == FErr {
+ f += fmt.Sprintf("%v literal", Tconv(n.Type, 0))
+ return f
+ }
+ if fmtmode == FExp && n.Implicit != 0 {
+ f += fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("(%v{ %v })", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OKEY:
+ if n.Left != nil && n.Right != nil {
+ if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
+ // requires special handling of field names
+ f += fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(n.Right, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v:%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+ }
+ }
+
+ if !(n.Left != nil) && n.Right != nil {
+ f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
+ return f
+ }
+ if n.Left != nil && !(n.Right != nil) {
+ f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
+ return f
+ }
+ f += ":"
+ return f
+
+ case OXDOT,
+ ODOT,
+ ODOTPTR,
+ ODOTINTER,
+ ODOTMETH,
+ OCALLPART:
+ f += exprfmt(n.Left, nprec)
+ if n.Right == nil || n.Right.Sym == nil {
+ f += ".<nil>"
+ return f
+ }
+ f += fmt.Sprintf(".%v", Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+
+ case ODOTTYPE,
+ ODOTTYPE2:
+ f += exprfmt(n.Left, nprec)
+ if n.Right != nil {
+ f += fmt.Sprintf(".(%v)", Nconv(n.Right, 0))
+ return f
+ }
+ f += fmt.Sprintf(".(%v)", Tconv(n.Type, 0))
+ return f
+
+ case OINDEX,
+ OINDEXMAP,
+ OSLICE,
+ OSLICESTR,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR:
+ f += exprfmt(n.Left, nprec)
+ f += fmt.Sprintf("[%v]", Nconv(n.Right, 0))
+ return f
+
+ case OCOPY,
+ OCOMPLEX:
+ f += fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ ORUNESTR:
+ if n.Type == nil || n.Type.Sym == nil {
+ f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ if n.Left != nil {
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OREAL,
+ OIMAG,
+ OAPPEND,
+ OCAP,
+ OCLOSE,
+ ODELETE,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ ORECOVER,
+ OPRINT,
+ OPRINTN:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
+ return f
+ }
+ if n.Isddd != 0 {
+ f += fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ f += exprfmt(n.Left, nprec)
+ if n.Isddd != 0 {
+ f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("(%v)", Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OMAKEMAP,
+ OMAKECHAN,
+ OMAKESLICE:
+ if n.List != nil { // pre-typecheck
+ f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ if n.Right != nil {
+ f += fmt.Sprintf("make(%v, %v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+ }
+ if n.Left != nil {
+ f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("make(%v)", Tconv(n.Type, 0))
+ return f
+
+ // Unary
+ case OPLUS,
+ OMINUS,
+ OADDR,
+ OCOM,
+ OIND,
+ ONOT,
+ ORECV:
+ if n.Left.Op == n.Op {
+ f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
+ } else {
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ }
+ f += exprfmt(n.Left, nprec+1)
+ return f
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ ORSH,
+ OSEND,
+ OSUB,
+ OXOR:
+ f += exprfmt(n.Left, nprec)
+
+ f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
+ f += exprfmt(n.Right, nprec+1)
+ return f
+
+ case OADDSTR:
+ for l = n.List; l != nil; l = l.Next {
+ if l != n.List {
+ f += fmt.Sprintf(" + ")
+ }
+ f += exprfmt(l.N, nprec)
+ }
+
+ return f
+
+ case OCMPSTR,
+ OCMPIFACE:
+ f += exprfmt(n.Left, nprec)
+ f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
+ f += exprfmt(n.Right, nprec+1)
+ return f
+ }
+
+ f += fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
+ return f
+}
+
+func nodefmt(n *Node, flag int) string {
+ var f string
+
+ var t *Type
+
+ t = n.Type
+
+ // we almost always want the original, except in export mode for literals
+ // this saves the importer some work, and avoids us having to redo some
+ // special casing for package unsafe
+ if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
+ n = n.Orig
+ }
+
+ if flag&obj.FmtLong != 0 /*untyped*/ && t != nil {
+ if t.Etype == TNIL {
+ f += fmt.Sprintf("nil")
+ return f
+ } else {
+ f += fmt.Sprintf("%v (type %v)", Nconv(n, 0), Tconv(t, 0))
+ return f
+ }
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if opprec[n.Op] < 0 {
+ return stmtfmt(n)
+ }
+
+ f += exprfmt(n, 0)
+ return f
+}
+
+var dumpdepth int
+
+func indent(s string) string {
+ return s + "\n" + strings.Repeat(". ", dumpdepth)
+}
+
+func nodedump(n *Node, flag int) string {
+ var fp string
+
+ var recur int
+
+ if n == nil {
+ return fp
+ }
+
+ recur = bool2int(!(flag&obj.FmtShort != 0 /*untyped*/))
+
+ if recur != 0 {
+ fp = indent(fp)
+ if dumpdepth > 10 {
+ fp += "..."
+ return fp
+ }
+
+ if n.Ninit != nil {
+ fp += fmt.Sprintf("%v-init%v", Oconv(int(n.Op), 0), Hconv(n.Ninit, 0))
+ fp = indent(fp)
+ }
+ }
+
+ // fmtprint(fp, "[%p]", n);
+
+ switch n.Op {
+ default:
+ fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+
+ case OREGISTER,
+ OINDREG:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Ctxt.Rconv(int(n.Val.U.Reg)), Jconv(n, 0))
+
+ case OLITERAL:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Vconv(&n.Val, 0), Jconv(n, 0))
+
+ case ONAME,
+ ONONAME:
+ if n.Sym != nil {
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0))
+ } else {
+ fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+ }
+ if recur != 0 && n.Type == nil && n.Ntype != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
+ }
+
+ case OASOP:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0))
+
+ case OTYPE:
+ fp += fmt.Sprintf("%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0))
+ if recur != 0 && n.Type == nil && n.Ntype != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
+ }
+ }
+
+ if n.Sym != nil && n.Op != ONAME {
+ fp += fmt.Sprintf(" %v G%d", Sconv(n.Sym, 0), n.Vargen)
+ }
+
+ if n.Type != nil {
+ fp += fmt.Sprintf(" %v", Tconv(n.Type, 0))
+ }
+
+ if recur != 0 {
+ if n.Left != nil {
+ fp += fmt.Sprintf("%v", Nconv(n.Left, 0))
+ }
+ if n.Right != nil {
+ fp += fmt.Sprintf("%v", Nconv(n.Right, 0))
+ }
+ if n.List != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-list%v", Oconv(int(n.Op), 0), Hconv(n.List, 0))
+ }
+
+ if n.Rlist != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-rlist%v", Oconv(int(n.Op), 0), Hconv(n.Rlist, 0))
+ }
+
+ if n.Ntest != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-test%v", Oconv(int(n.Op), 0), Nconv(n.Ntest, 0))
+ }
+
+ if n.Nbody != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-body%v", Oconv(int(n.Op), 0), Hconv(n.Nbody, 0))
+ }
+
+ if n.Nelse != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-else%v", Oconv(int(n.Op), 0), Hconv(n.Nelse, 0))
+ }
+
+ if n.Nincr != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-incr%v", Oconv(int(n.Op), 0), Nconv(n.Nincr, 0))
+ }
+ }
+
+ return fp
+}
+
+// Fmt "%S": syms
+// Flags: "%hS" suppresses qualifying with package
+func Sconv(s *Sym, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if flag&obj.FmtLong != 0 /*untyped*/ {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ fp += "<S>"
+ return fp
+ }
+
+ if s.Name == "_" {
+ fp += "_"
+ return fp
+ }
+
+ sf = flag
+ sm = setfmode(&flag)
+ _ = r
+ str := symfmt(s, flag)
+ flag = sf
+ fmtmode = sm
+ return str
+}
+
+// Fmt "%T": types.
+// Flags: 'l' print definition, not name
+// 'h' omit 'func' and receiver from function types, short type names
+// 'u' package name, not prefix (FTypeId mode, sticky)
+func Tconv(t *Type, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if t == nil {
+ fp += "<T>"
+ return fp
+ }
+
+ if t.Trecur > 4 {
+ fp += "<...>"
+ return fp
+ }
+
+ t.Trecur++
+ sf = flag
+ sm = setfmode(&flag)
+
+ if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+ fmtpkgpfx++
+ }
+ if fmtpkgpfx != 0 {
+ flag |= obj.FmtUnsigned
+ }
+
+ _ = r
+ str := typefmt(t, flag)
+
+ if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+ fmtpkgpfx--
+ }
+
+ flag = sf
+ fmtmode = sm
+ t.Trecur--
+ return str
+}
+
+// Fmt '%N': Nodes.
+// Flags: 'l' suffix with "(type %T)" where possible
+// '+h' in debug mode, don't recurse, no multiline output
+func Nconv(n *Node, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if n == nil {
+ fp += "<N>"
+ return fp
+ }
+ sf = flag
+ sm = setfmode(&flag)
+
+ _ = r
+ var str string
+ switch fmtmode {
+ case FErr,
+ FExp:
+ str = nodefmt(n, flag)
+
+ case FDbg:
+ dumpdepth++
+ str = nodedump(n, flag)
+ dumpdepth--
+
+ default:
+ Fatal("unhandled %N mode")
+ }
+
+ flag = sf
+ fmtmode = sm
+ return str
+}
+
+// Fmt '%H': NodeList.
+// Flags: all those of %N plus ',': separate with comma's instead of semicolons.
+func Hconv(l *NodeList, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+ var sep string
+
+ if l == nil && fmtmode == FDbg {
+ fp += "<nil>"
+ return fp
+ }
+
+ sf = flag
+ sm = setfmode(&flag)
+ _ = r
+ sep = "; "
+ if fmtmode == FDbg {
+ sep = "\n"
+ } else if flag&obj.FmtComma != 0 /*untyped*/ {
+ sep = ", "
+ }
+
+ for ; l != nil; l = l.Next {
+ fp += fmt.Sprintf("%v", Nconv(l.N, 0))
+ if l.Next != nil {
+ fp += sep
+ }
+ }
+
+ flag = sf
+ fmtmode = sm
+ return fp
+}
+
+func dumplist(s string, l *NodeList) {
+ fmt.Printf("%s%v\n", s, Hconv(l, obj.FmtSign))
+}
+
+func Dump(s string, n *Node) {
+ fmt.Printf("%s [%p]%v\n", s, n, Nconv(n, obj.FmtSign))
+}
diff --git a/src/cmd/internal/gc/gen.go b/src/cmd/internal/gc/gen.go
new file mode 100644
index 0000000000..a40a347285
--- /dev/null
+++ b/src/cmd/internal/gc/gen.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * portable half of code generator.
+ * mainly statements and control flow.
+ */
+var labellist *Label
+
+var lastlabel *Label
+
+func Sysfunc(name string) *Node {
+ var n *Node
+
+ n = newname(Pkglookup(name, Runtimepkg))
+ n.Class = PFUNC
+ return n
+}
+
+/*
+ * the address of n has been taken and might be used after
+ * the current function returns. mark any local vars
+ * as needing to move to the heap.
+ */
+func addrescapes(n *Node) {
+ var buf string
+ var oldfn *Node
+
+ switch n.Op {
+ // probably a type error already.
+ // dump("addrescapes", n);
+ default:
+ break
+
+ case ONAME:
+ if n == nodfp {
+ break
+ }
+
+ // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+ // on PPARAM it means something different.
+ if n.Class == PAUTO && n.Esc == EscNever {
+ break
+ }
+
+ switch n.Class {
+ case PPARAMREF:
+ addrescapes(n.Defn)
+
+ // if func param, need separate temporary
+ // to hold heap pointer.
+ // the function type has already been checked
+ // (we're in the function body)
+ // so the param already has a valid xoffset.
+
+ // expression to refer to stack copy
+ case PPARAM,
+ PPARAMOUT:
+ n.Stackparam = Nod(OPARAM, n, nil)
+
+ n.Stackparam.Type = n.Type
+ n.Stackparam.Addable = 1
+ if n.Xoffset == BADWIDTH {
+ Fatal("addrescapes before param assignment")
+ }
+ n.Stackparam.Xoffset = n.Xoffset
+ fallthrough
+
+ // fallthrough
+
+ case PAUTO:
+ n.Class |= PHEAP
+
+ n.Addable = 0
+ n.Ullman = 2
+ n.Xoffset = 0
+
+ // create stack variable to hold pointer to heap
+ oldfn = Curfn
+
+ Curfn = n.Curfn
+ n.Heapaddr = temp(Ptrto(n.Type))
+ buf = fmt.Sprintf("&%v", Sconv(n.Sym, 0))
+ n.Heapaddr.Sym = Lookup(buf)
+ n.Heapaddr.Orig.Sym = n.Heapaddr.Sym
+ n.Esc = EscHeap
+ if Debug['m'] != 0 {
+ fmt.Printf("%v: moved to heap: %v\n", n.Line(), Nconv(n, 0))
+ }
+ Curfn = oldfn
+ }
+
+ case OIND,
+ ODOTPTR:
+ break
+
+ // ODOTPTR has already been introduced,
+ // so these are the non-pointer ODOT and OINDEX.
+ // In &x[0], if x is a slice, then x does not
+ // escape--the pointer inside x does, but that
+ // is always a heap pointer anyway.
+ case ODOT,
+ OINDEX:
+ if !(Isslice(n.Left.Type) != 0) {
+ addrescapes(n.Left)
+ }
+ }
+}
+
+func clearlabels() {
+ var l *Label
+
+ for l = labellist; l != nil; l = l.Link {
+ l.Sym.Label = nil
+ }
+
+ labellist = nil
+ lastlabel = nil
+}
+
+func newlab(n *Node) *Label {
+ var s *Sym
+ var lab *Label
+
+ s = n.Left.Sym
+ lab = s.Label
+ if lab == nil {
+ lab = new(Label)
+ if lastlabel == nil {
+ labellist = lab
+ } else {
+ lastlabel.Link = lab
+ }
+ lastlabel = lab
+ lab.Sym = s
+ s.Label = lab
+ }
+
+ if n.Op == OLABEL {
+ if lab.Def != nil {
+ Yyerror("label %v already defined at %v", Sconv(s, 0), lab.Def.Line())
+ } else {
+ lab.Def = n
+ }
+ } else {
+ lab.Use = list(lab.Use, n)
+ }
+
+ return lab
+}
+
+func checkgoto(from *Node, to *Node) {
+ var nf int
+ var nt int
+ var block *Sym
+ var dcl *Sym
+ var fs *Sym
+ var ts *Sym
+ var lno int
+
+ if from.Sym == to.Sym {
+ return
+ }
+
+ nf = 0
+ for fs = from.Sym; fs != nil; fs = fs.Link {
+ nf++
+ }
+ nt = 0
+ for fs = to.Sym; fs != nil; fs = fs.Link {
+ nt++
+ }
+ fs = from.Sym
+ for ; nf > nt; nf-- {
+ fs = fs.Link
+ }
+ if fs != to.Sym {
+ lno = int(lineno)
+ setlineno(from)
+
+ // decide what to complain about.
+ // prefer to complain about 'into block' over declarations,
+ // so scan backward to find most recent block or else dcl.
+ block = nil
+
+ dcl = nil
+ ts = to.Sym
+ for ; nt > nf; nt-- {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ }
+
+ for ts != fs {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ fs = fs.Link
+ }
+
+ if block != nil {
+ Yyerror("goto %v jumps into block starting at %v", Sconv(from.Left.Sym, 0), Ctxt.Line(int(block.Lastlineno)))
+ } else {
+ Yyerror("goto %v jumps over declaration of %v at %v", Sconv(from.Left.Sym, 0), Sconv(dcl, 0), Ctxt.Line(int(dcl.Lastlineno)))
+ }
+ lineno = int32(lno)
+ }
+}
+
+func stmtlabel(n *Node) *Label {
+ var lab *Label
+
+ if n.Sym != nil {
+ lab = n.Sym.Label
+ if lab != nil {
+ if lab.Def != nil {
+ if lab.Def.Defn == n {
+ return lab
+ }
+ }
+ }
+ }
+ return nil
+}
+
+/*
+ * compile statements
+ */
+func Genlist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ gen(l.N)
+ }
+}
+
+/*
+ * generate code to start new proc running call n.
+ */
+func cgen_proc(n *Node, proc int) {
+ switch n.Left.Op {
+ default:
+ Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
+ fallthrough
+
+ case OCALLMETH:
+ Cgen_callmeth(n.Left, proc)
+
+ case OCALLINTER:
+ Thearch.Cgen_callinter(n.Left, nil, proc)
+
+ case OCALLFUNC:
+ Thearch.Cgen_call(n.Left, proc)
+ }
+}
+
+/*
+ * generate declaration.
+ * have to allocate heap copy
+ * for escaped variables.
+ */
+func cgen_dcl(n *Node) {
+ if Debug['g'] != 0 {
+ Dump("\ncgen-dcl", n)
+ }
+ if n.Op != ONAME {
+ Dump("cgen_dcl", n)
+ Fatal("cgen_dcl")
+ }
+
+ if !(n.Class&PHEAP != 0) {
+ return
+ }
+ if compiling_runtime != 0 {
+ Yyerror("%v escapes to heap, not allowed in runtime.", Nconv(n, 0))
+ }
+ if n.Alloc == nil {
+ n.Alloc = callnew(n.Type)
+ }
+ Cgen_as(n.Heapaddr, n.Alloc)
+}
+
+/*
+ * generate discard of value
+ */
+func cgen_discard(nr *Node) {
+ var tmp Node
+
+ if nr == nil {
+ return
+ }
+
+ switch nr.Op {
+ case ONAME:
+ if !(nr.Class&PHEAP != 0) && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
+ gused(nr)
+ }
+
+ // unary
+ case OADD,
+ OAND,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ ORSH,
+ OSUB,
+ OXOR:
+ cgen_discard(nr.Left)
+
+ cgen_discard(nr.Right)
+
+ // binary
+ case OCAP,
+ OCOM,
+ OLEN,
+ OMINUS,
+ ONOT,
+ OPLUS:
+ cgen_discard(nr.Left)
+
+ case OIND:
+ Cgen_checknil(nr.Left)
+
+ // special enough to just evaluate
+ default:
+ Tempname(&tmp, nr.Type)
+
+ Cgen_as(&tmp, nr)
+ gused(&tmp)
+ }
+}
+
+/*
+ * clearslim generates code to zero a slim node.
+ */
+func Clearslim(n *Node) {
+ var z Node
+ var zero Mpflt
+
+ z = Node{}
+ z.Op = OLITERAL
+ z.Type = n.Type
+ z.Addable = 1
+
+ switch Simtype[n.Type.Etype] {
+ case TCOMPLEX64,
+ TCOMPLEX128:
+ z.Val.U.Cval = new(Mpcplx)
+ Mpmovecflt(&z.Val.U.Cval.Real, 0.0)
+ Mpmovecflt(&z.Val.U.Cval.Imag, 0.0)
+
+ case TFLOAT32,
+ TFLOAT64:
+ Mpmovecflt(&zero, 0.0)
+ z.Val.Ctype = CTFLT
+ z.Val.U.Fval = &zero
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TMAP:
+ z.Val.Ctype = CTNIL
+
+ case TBOOL:
+ z.Val.Ctype = CTBOOL
+
+ case TINT8,
+ TINT16,
+ TINT32,
+ TINT64,
+ TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64:
+ z.Val.Ctype = CTINT
+ z.Val.U.Xval = new(Mpint)
+ Mpmovecfix(z.Val.U.Xval, 0)
+
+ default:
+ Fatal("clearslim called on type %v", Tconv(n.Type, 0))
+ }
+
+ ullmancalc(&z)
+ Thearch.Cgen(&z, n)
+}
+
+/*
+ * generate:
+ * res = iface{typ, data}
+ * n->left is typ
+ * n->right is data
+ */
+func Cgen_eface(n *Node, res *Node) {
+ var dst Node
+ /*
+ * the right node of an eface may contain function calls that uses res as an argument,
+ * so it's important that it is done first
+ */
+
+ var tmp *Node
+
+ tmp = temp(Types[Tptr])
+ Thearch.Cgen(n.Right, tmp)
+
+ Gvardef(res)
+
+ dst = *res
+ dst.Type = Types[Tptr]
+ dst.Xoffset += int64(Widthptr)
+ Thearch.Cgen(tmp, &dst)
+
+ dst.Xoffset -= int64(Widthptr)
+ Thearch.Cgen(n.Left, &dst)
+}
+
+/*
+ * generate:
+ * res = s[lo, hi];
+ * n->left is s
+ * n->list is (cap(s)-lo(TUINT), hi-lo(TUINT)[, lo*width(TUINTPTR)])
+ * caller (cgen) guarantees res is an addable ONAME.
+ *
+ * called for OSLICE, OSLICE3, OSLICEARR, OSLICE3ARR, OSLICESTR.
+ */
+func Cgen_slice(n *Node, res *Node) {
+ var src Node
+ var dst Node
+ var cap *Node
+ var len *Node
+ var offs *Node
+ var add *Node
+ var base *Node
+ var tmpcap *Node
+ var tmplen *Node
+ var cmp *Node
+ var con Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ cap = n.List.N
+ len = n.List.Next.N
+ offs = nil
+ if n.List.Next.Next != nil {
+ offs = n.List.Next.Next.N
+ }
+
+ // evaluate base pointer first, because it is the only
+ // possibly complex expression. once that is evaluated
+ // and stored, updating the len and cap can be done
+ // without making any calls, so without doing anything that
+ // might cause preemption or garbage collection.
+ // this makes the whole slice update atomic as far as the
+ // garbage collector can see.
+ base = temp(Types[TUINTPTR])
+
+ tmplen = temp(Types[TINT])
+ if n.Op != OSLICESTR {
+ tmpcap = temp(Types[TINT])
+ } else {
+ tmpcap = tmplen
+ }
+
+ if isnil(n.Left) != 0 {
+ Tempname(&src, n.Left.Type)
+ Thearch.Cgen(n.Left, &src)
+ } else {
+ src = *n.Left
+ }
+ if n.Op == OSLICE || n.Op == OSLICE3 || n.Op == OSLICESTR {
+ src.Xoffset += int64(Array_array)
+ }
+
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+ if !(Isptr[n.Left.Type.Etype] != 0) {
+ Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
+ }
+ Thearch.Cgen(&src, base)
+ Cgen_checknil(base)
+ } else {
+ src.Type = Types[Tptr]
+ Thearch.Cgen(&src, base)
+ }
+
+ // committed to the update
+ Gvardef(res)
+
+ // compute len and cap.
+ // len = n-i, cap = m-i, and offs = i*width.
+ // computing offs last lets the multiply overwrite i.
+ Thearch.Cgen((*Node)(len), tmplen)
+
+ if n.Op != OSLICESTR {
+ Thearch.Cgen(cap, tmpcap)
+ }
+
+ // if new cap != 0 { base += add }
+ // This avoids advancing base past the end of the underlying array/string,
+ // so that it cannot point at the next object in memory.
+ // If cap == 0, the base doesn't matter except insofar as it is 0 or non-zero.
+ // In essence we are replacing x[i:j:k] where i == j == k
+ // or x[i:j] where i == j == cap(x) with x[0:0:0].
+ if offs != nil {
+ p1 = gjmp(nil)
+ p2 = gjmp(nil)
+ Patch(p1, Pc)
+
+ Nodconst(&con, tmpcap.Type, 0)
+ cmp = Nod(OEQ, tmpcap, &con)
+ typecheck(&cmp, Erv)
+ Thearch.Bgen(cmp, true, -1, p2)
+
+ add = Nod(OADD, base, offs)
+ typecheck(&add, Erv)
+ Thearch.Cgen(add, base)
+
+ Patch(p2, Pc)
+ }
+
+ // dst.array = src.array [ + lo *width ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_array)
+ dst.Type = Types[Tptr]
+ Thearch.Cgen(base, &dst)
+
+ // dst.len = hi [ - lo ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_nel)
+ dst.Type = Types[Simtype[TUINT]]
+ Thearch.Cgen(tmplen, &dst)
+
+ if n.Op != OSLICESTR {
+ // dst.cap = cap [ - lo ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_cap)
+ dst.Type = Types[Simtype[TUINT]]
+ Thearch.Cgen(tmpcap, &dst)
+ }
+}
+
+/*
+ * gather series of offsets
+ * >=0 is direct addressed field
+ * <0 is pointer to next field (+1)
+ */
+func Dotoffset(n *Node, oary []int64, nn **Node) int {
+ var i int
+
+ switch n.Op {
+ case ODOT:
+ if n.Xoffset == BADWIDTH {
+ Dump("bad width in dotoffset", n)
+ Fatal("bad width in dotoffset")
+ }
+
+ i = Dotoffset(n.Left, oary, nn)
+ if i > 0 {
+ if oary[i-1] >= 0 {
+ oary[i-1] += n.Xoffset
+ } else {
+ oary[i-1] -= n.Xoffset
+ }
+ break
+ }
+
+ if i < 10 {
+ oary[i] = n.Xoffset
+ i++
+ }
+
+ case ODOTPTR:
+ if n.Xoffset == BADWIDTH {
+ Dump("bad width in dotoffset", n)
+ Fatal("bad width in dotoffset")
+ }
+
+ i = Dotoffset(n.Left, oary, nn)
+ if i < 10 {
+ oary[i] = -(n.Xoffset + 1)
+ i++
+ }
+
+ default:
+ *nn = n
+ return 0
+ }
+
+ if i >= 10 {
+ *nn = nil
+ }
+ return i
+}
+
+/*
+ * make a new off the books
+ */
+func Tempname(nn *Node, t *Type) {
+ var n *Node
+ var s *Sym
+
+ if Curfn == nil {
+ Fatal("no curfn for tempname")
+ }
+
+ if t == nil {
+ Yyerror("tempname called with nil type")
+ t = Types[TINT32]
+ }
+
+ // give each tmp a different name so that there
+ // a chance to registerizer them
+ namebuf = fmt.Sprintf("autotmp_%.4d", statuniqgen)
+
+ statuniqgen++
+ s = Lookup(namebuf)
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ s.Def = n
+ n.Type = t
+ n.Class = PAUTO
+ n.Addable = 1
+ n.Ullman = 1
+ n.Esc = EscNever
+ n.Curfn = Curfn
+ Curfn.Dcl = list(Curfn.Dcl, n)
+
+ dowidth(t)
+ n.Xoffset = 0
+ *nn = *n
+}
+
+func temp(t *Type) *Node {
+ var n *Node
+
+ n = Nod(OXXX, nil, nil)
+ Tempname(n, t)
+ n.Sym.Def.Used = 1
+ return n.Orig
+}
+
+func gen(n *Node) {
+ var lno int32
+ var scontin *obj.Prog
+ var sbreak *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var lab *Label
+ var wasregalloc int32
+
+ //dump("gen", n);
+
+ lno = setlineno(n)
+
+ wasregalloc = int32(Thearch.Anyregalloc())
+
+ if n == nil {
+ goto ret
+ }
+
+ if n.Ninit != nil {
+ Genlist(n.Ninit)
+ }
+
+ setlineno(n)
+
+ switch n.Op {
+ default:
+ Fatal("gen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OCASE,
+ OFALL,
+ OXCASE,
+ OXFALL,
+ ODCLCONST,
+ ODCLFUNC,
+ ODCLTYPE:
+ break
+
+ case OEMPTY:
+ break
+
+ case OBLOCK:
+ Genlist(n.List)
+
+ case OLABEL:
+ if isblanksym(n.Left.Sym) {
+ break
+ }
+
+ lab = newlab(n)
+
+ // if there are pending gotos, resolve them all to the current pc.
+ for p1 = lab.Gotopc; p1 != nil; p1 = p2 {
+ p2 = unpatch(p1)
+ Patch(p1, Pc)
+ }
+
+ lab.Gotopc = nil
+ if lab.Labelpc == nil {
+ lab.Labelpc = Pc
+ }
+
+ if n.Defn != nil {
+ switch n.Defn.Op {
+ // so stmtlabel can find the label
+ case OFOR,
+ OSWITCH,
+ OSELECT:
+ n.Defn.Sym = lab.Sym
+ }
+ }
+
+ // if label is defined, emit jump to it.
+ // otherwise save list of pending gotos in lab->gotopc.
+ // the list is linked through the normal jump target field
+ // to avoid a second list. (the jumps are actually still
+ // valid code, since they're just going to another goto
+ // to the same label. we'll unwind it when we learn the pc
+ // of the label in the OLABEL case above.)
+ case OGOTO:
+ lab = newlab(n)
+
+ if lab.Labelpc != nil {
+ gjmp(lab.Labelpc)
+ } else {
+ lab.Gotopc = gjmp(lab.Gotopc)
+ }
+
+ case OBREAK:
+ if n.Left != nil {
+ lab = n.Left.Sym.Label
+ if lab == nil {
+ Yyerror("break label not defined: %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ lab.Used = 1
+ if lab.Breakpc == nil {
+ Yyerror("invalid break label %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ gjmp(lab.Breakpc)
+ break
+ }
+
+ if breakpc == nil {
+ Yyerror("break is not in a loop")
+ break
+ }
+
+ gjmp(breakpc)
+
+ case OCONTINUE:
+ if n.Left != nil {
+ lab = n.Left.Sym.Label
+ if lab == nil {
+ Yyerror("continue label not defined: %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ lab.Used = 1
+ if lab.Continpc == nil {
+ Yyerror("invalid continue label %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ gjmp(lab.Continpc)
+ break
+ }
+
+ if continpc == nil {
+ Yyerror("continue is not in a loop")
+ break
+ }
+
+ gjmp(continpc)
+
+ case OFOR:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+ scontin = continpc
+ continpc = Pc
+
+ // define break and continue labels
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ lab.Continpc = continpc
+ }
+
+ gen(n.Nincr) // contin: incr
+ Patch(p1, Pc) // test:
+ Thearch.Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
+ Genlist(n.Nbody) // body
+ gjmp(continpc)
+ Patch(breakpc, Pc) // done:
+ continpc = scontin
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ lab.Continpc = nil
+ }
+
+ case OIF:
+ p1 = gjmp(nil) // goto test
+ p2 = gjmp(nil) // p2: goto else
+ Patch(p1, Pc) // test:
+ Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
+ Genlist(n.Nbody) // then
+ p3 = gjmp(nil) // goto done
+ Patch(p2, Pc) // else:
+ Genlist(n.Nelse) // else
+ Patch(p3, Pc) // done:
+
+ case OSWITCH:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+
+ // define break label
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ }
+
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // switch(test) body
+ Patch(breakpc, Pc) // done:
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ }
+
+ case OSELECT:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+
+ // define break label
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ }
+
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // select() body
+ Patch(breakpc, Pc) // done:
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ }
+
+ case ODCL:
+ cgen_dcl(n.Left)
+
+ case OAS:
+ if gen_as_init(n) != 0 {
+ break
+ }
+ Cgen_as(n.Left, n.Right)
+
+ case OCALLMETH:
+ Cgen_callmeth(n, 0)
+
+ case OCALLINTER:
+ Thearch.Cgen_callinter(n, nil, 0)
+
+ case OCALLFUNC:
+ Thearch.Cgen_call(n, 0)
+
+ case OPROC:
+ cgen_proc(n, 1)
+
+ case ODEFER:
+ cgen_proc(n, 2)
+
+ case ORETURN,
+ ORETJMP:
+ Thearch.Cgen_ret(n)
+
+ case OCHECKNIL:
+ Cgen_checknil(n.Left)
+
+ case OVARKILL:
+ gvarkill(n.Left)
+ }
+
+ret:
+ if int32(Thearch.Anyregalloc()) != wasregalloc {
+ Dump("node", n)
+ Fatal("registers left allocated")
+ }
+
+ lineno = lno
+}
+
+func Cgen_as(nl *Node, nr *Node) {
+ var tl *Type
+
+ if Debug['g'] != 0 {
+ Dump("cgen_as", nl)
+ Dump("cgen_as = ", nr)
+ }
+
+ for nr != nil && nr.Op == OCONVNOP {
+ nr = nr.Left
+ }
+
+ if nl == nil || isblank(nl) {
+ cgen_discard(nr)
+ return
+ }
+
+ if nr == nil || iszero(nr) != 0 {
+ // heaps should already be clear
+ if nr == nil && (nl.Class&PHEAP != 0) {
+ return
+ }
+
+ tl = nl.Type
+ if tl == nil {
+ return
+ }
+ if Isfat(tl) != 0 {
+ if nl.Op == ONAME {
+ Gvardef(nl)
+ }
+ Thearch.Clearfat(nl)
+ return
+ }
+
+ Clearslim(nl)
+ return
+ }
+
+ tl = nl.Type
+ if tl == nil {
+ return
+ }
+
+ Thearch.Cgen(nr, nl)
+}
+
+func Cgen_callmeth(n *Node, proc int) {
+ var n2 Node
+ var l *Node
+
+ // generate a rewrite in n2 for the method call
+ // (p.f)(...) goes to (f)(p,...)
+
+ l = n.Left
+
+ if l.Op != ODOTMETH {
+ Fatal("cgen_callmeth: not dotmethod: %v")
+ }
+
+ n2 = *n
+ n2.Op = OCALLFUNC
+ n2.Left = l.Right
+ n2.Left.Type = l.Type
+
+ if n2.Left.Op == ONAME {
+ n2.Left.Class = PFUNC
+ }
+ Thearch.Cgen_call(&n2, proc)
+}
+
+func checklabels() {
+ var lab *Label
+ var l *NodeList
+
+ for lab = labellist; lab != nil; lab = lab.Link {
+ if lab.Def == nil {
+ for l = lab.Use; l != nil; l = l.Next {
+ yyerrorl(int(l.N.Lineno), "label %v not defined", Sconv(lab.Sym, 0))
+ }
+ continue
+ }
+
+ if lab.Use == nil && !(lab.Used != 0) {
+ yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", Sconv(lab.Sym, 0))
+ continue
+ }
+
+ if lab.Gotopc != nil {
+ Fatal("label %v never resolved", Sconv(lab.Sym, 0))
+ }
+ for l = lab.Use; l != nil; l = l.Next {
+ checkgoto(l.N, lab.Def)
+ }
+ }
+}
diff --git a/src/cmd/internal/gc/go.go b/src/cmd/internal/gc/go.go
new file mode 100644
index 0000000000..ff1a42969c
--- /dev/null
+++ b/src/cmd/internal/gc/go.go
@@ -0,0 +1,1179 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "encoding/binary"
+)
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// avoid <ctype.h>
+
+// The parser's maximum stack size.
+// We have to use a #define macro here since yacc
+// or bison will check for its definition and use
+// a potentially smaller value if it is undefined.
+const (
+ NHUNK = 50000
+ BUFSIZ = 8192
+ NSYMB = 500
+ NHASH = 1024
+ STRINGSZ = 200
+ MAXALIGN = 7
+ UINF = 100
+ PRIME1 = 3
+ AUNK = 100
+ AMEM = 0 + iota - 9
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ANOEQ
+ ANOEQ0
+ ANOEQ8
+ ANOEQ16
+ ANOEQ32
+ ANOEQ64
+ ANOEQ128
+ ASTRING
+ AINTER
+ ANILINTER
+ ASLICE
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+ BADWIDTH = -1000000000
+ MaxStackVarSize = 10 * 1024 * 1024
+)
+
+/*
+ * note this is the representation
+ * of the compilers string literals,
+ * it is not the runtime representation
+ */
+type Strlit struct {
+ S string
+}
+
+const (
+ Mpscale = 29
+ Mpprec = 16
+ Mpnorm = Mpprec - 1
+ Mpbase = 1 << Mpscale
+ Mpsign = Mpbase >> 1
+ Mpmask = Mpbase - 1
+ Mpdebug = 0
+)
+
+type Mpint struct {
+ A [Mpprec]int
+ Neg uint8
+ Ovf uint8
+}
+
+type Mpflt struct {
+ Val Mpint
+ Exp int16
+}
+
+type Mpcplx struct {
+ Real Mpflt
+ Imag Mpflt
+}
+
+type Val struct {
+ Ctype int16
+ U struct {
+ Reg int16
+ Bval int16
+ Xval *Mpint
+ Fval *Mpflt
+ Cval *Mpcplx
+ Sval *Strlit
+ }
+}
+
+type Array struct {
+ length int32
+ size int32
+ capacity int32
+ data string
+}
+
+type Bvec struct {
+ n int32
+ b []uint32
+}
+
+type Pkg struct {
+ Name string
+ Path *Strlit
+ Pathsym *Sym
+ Prefix string
+ Link *Pkg
+ Imported uint8
+ Exported int8
+ Direct int8
+ Safe bool
+}
+
+type Sym struct {
+ Lexical uint16
+ Flags uint8
+ Sym uint8
+ Link *Sym
+ Npkg int32
+ Uniqgen uint32
+ Importdef *Pkg
+ Linkname string
+ Pkg *Pkg
+ Name string
+ Def *Node
+ Label *Label
+ Block int32
+ Lastlineno int32
+ Origpkg *Pkg
+ Lsym *obj.LSym
+}
+
+type Node struct {
+ Left *Node
+ Right *Node
+ Ntest *Node
+ Nincr *Node
+ Ninit *NodeList
+ Nbody *NodeList
+ Nelse *NodeList
+ List *NodeList
+ Rlist *NodeList
+ Op uint8
+ Nointerface bool
+ Ullman uint8
+ Addable uint8
+ Trecur uint8
+ Etype uint8
+ Bounded uint8
+ Class uint8
+ Method uint8
+ Embedded uint8
+ Colas uint8
+ Diag uint8
+ Noescape bool
+ Nosplit bool
+ Builtin uint8
+ Nowritebarrier bool
+ Walkdef uint8
+ Typecheck uint8
+ Local uint8
+ Dodata uint8
+ Initorder uint8
+ Used uint8
+ Isddd uint8
+ Readonly uint8
+ Implicit uint8
+ Addrtaken uint8
+ Assigned uint8
+ Captured uint8
+ Byval uint8
+ Dupok uint8
+ Wrapper uint8
+ Reslice uint8
+ Likely int8
+ Hasbreak uint8
+ Needzero uint8
+ Needctxt uint8
+ Esc uint
+ Funcdepth int
+ Type *Type
+ Orig *Node
+ Nname *Node
+ Shortname *Node
+ Enter *NodeList
+ Exit *NodeList
+ Cvars *NodeList
+ Dcl *NodeList
+ Inl *NodeList
+ Inldcl *NodeList
+ Val Val
+ Ntype *Node
+ Defn *Node
+ Pack *Node
+ Curfn *Node
+ Paramfld *Type
+ Decldepth int
+ Heapaddr *Node
+ Outerexpr *Node
+ Stackparam *Node
+ Alloc *Node
+ Outer *Node
+ Closure *Node
+ Top int
+ Inlvar *Node
+ Pkg *Pkg
+ Initplan *InitPlan
+ Escflowsrc *NodeList
+ Escretval *NodeList
+ Escloopdepth int
+ Sym *Sym
+ Vargen int32
+ Lineno int32
+ Endlineno int32
+ Xoffset int64
+ Stkdelta int64
+ Ostk int32
+ Iota int32
+ Walkgen uint32
+ Esclevel int32
+ Opt interface{}
+}
+
+type NodeList struct {
+ N *Node
+ Next *NodeList
+ End *NodeList
+}
+
+type Type struct {
+ Etype uint8
+ Nointerface uint8
+ Noalg uint8
+ Chan uint8
+ Trecur uint8
+ Printed uint8
+ Embedded uint8
+ Siggen uint8
+ Funarg uint8
+ Copyany uint8
+ Local uint8
+ Deferwidth uint8
+ Broke uint8
+ Isddd uint8
+ Align uint8
+ Haspointers uint8
+ Nod *Node
+ Orig *Type
+ Lineno int
+ Thistuple int
+ Outtuple int
+ Intuple int
+ Outnamed uint8
+ Method *Type
+ Xmethod *Type
+ Sym *Sym
+ Vargen int32
+ Nname *Node
+ Argwid int64
+ Type *Type
+ Width int64
+ Down *Type
+ Outer *Type
+ Note *Strlit
+ Bound int64
+ Bucket *Type
+ Hmap *Type
+ Hiter *Type
+ Map *Type
+ Maplineno int32
+ Embedlineno int32
+ Copyto *NodeList
+ Lastfn *Node
+}
+
+type Label struct {
+ Used uint8
+ Sym *Sym
+ Def *Node
+ Use *NodeList
+ Link *Label
+ Gotopc *obj.Prog
+ Labelpc *obj.Prog
+ Breakpc *obj.Prog
+ Continpc *obj.Prog
+}
+
+type InitEntry struct {
+ Xoffset int64
+ Key *Node
+ Expr *Node
+}
+
+type InitPlan struct {
+ Lit int64
+ Zero int64
+ Expr int64
+ E []InitEntry
+}
+
+const (
+ EscUnknown = iota
+ EscHeap
+ EscScope
+ EscNone
+ EscReturn
+ EscNever
+ EscBits = 3
+ EscMask = (1 << EscBits) - 1
+ EscContentEscapes = 1 << EscBits
+ EscReturnBits = EscBits + 1
+)
+
+/*
+ * Every node has a walkgen field.
+ * If you want to do a traversal of a node graph that
+ * might contain duplicates and want to avoid
+ * visiting the same nodes twice, increment walkgen
+ * before starting. Then before processing a node, do
+ *
+ * if(n->walkgen == walkgen)
+ * return;
+ * n->walkgen = walkgen;
+ *
+ * Such a walk cannot call another such walk recursively,
+ * because of the use of the global walkgen.
+ */
+var walkgen uint32
+
+const (
+ SymExport = 1 << 0
+ SymPackage = 1 << 1
+ SymExported = 1 << 2
+ SymUniq = 1 << 3
+ SymSiggen = 1 << 4
+ SymAsm = 1 << 5
+ SymAlgGen = 1 << 6
+)
+
+var dclstack *Sym
+
+type Iter struct {
+ Done int
+ Tfunc *Type
+ T *Type
+ An **Node
+ N *Node
+}
+
+// Node ops.
+const (
+ OXXX = iota
+ ONAME
+ ONONAME
+ OTYPE
+ OPACK
+ OLITERAL
+ OADD
+ OSUB
+ OOR
+ OXOR
+ OADDSTR
+ OADDR
+ OANDAND
+ OAPPEND
+ OARRAYBYTESTR
+ OARRAYBYTESTRTMP
+ OARRAYRUNESTR
+ OSTRARRAYBYTE
+ OSTRARRAYBYTETMP
+ OSTRARRAYRUNE
+ OAS
+ OAS2
+ OAS2FUNC
+ OAS2RECV
+ OAS2MAPR
+ OAS2DOTTYPE
+ OASOP
+ OCALL
+ OCALLFUNC
+ OCALLMETH
+ OCALLINTER
+ OCALLPART
+ OCAP
+ OCLOSE
+ OCLOSURE
+ OCMPIFACE
+ OCMPSTR
+ OCOMPLIT
+ OMAPLIT
+ OSTRUCTLIT
+ OARRAYLIT
+ OPTRLIT
+ OCONV
+ OCONVIFACE
+ OCONVNOP
+ OCOPY
+ ODCL
+ ODCLFUNC
+ ODCLFIELD
+ ODCLCONST
+ ODCLTYPE
+ ODELETE
+ ODOT
+ ODOTPTR
+ ODOTMETH
+ ODOTINTER
+ OXDOT
+ ODOTTYPE
+ ODOTTYPE2
+ OEQ
+ ONE
+ OLT
+ OLE
+ OGE
+ OGT
+ OIND
+ OINDEX
+ OINDEXMAP
+ OKEY
+ OPARAM
+ OLEN
+ OMAKE
+ OMAKECHAN
+ OMAKEMAP
+ OMAKESLICE
+ OMUL
+ ODIV
+ OMOD
+ OLSH
+ ORSH
+ OAND
+ OANDNOT
+ ONEW
+ ONOT
+ OCOM
+ OPLUS
+ OMINUS
+ OOROR
+ OPANIC
+ OPRINT
+ OPRINTN
+ OPAREN
+ OSEND
+ OSLICE
+ OSLICEARR
+ OSLICESTR
+ OSLICE3
+ OSLICE3ARR
+ ORECOVER
+ ORECV
+ ORUNESTR
+ OSELRECV
+ OSELRECV2
+ OIOTA
+ OREAL
+ OIMAG
+ OCOMPLEX
+ OBLOCK
+ OBREAK
+ OCASE
+ OXCASE
+ OCONTINUE
+ ODEFER
+ OEMPTY
+ OFALL
+ OXFALL
+ OFOR
+ OGOTO
+ OIF
+ OLABEL
+ OPROC
+ ORANGE
+ ORETURN
+ OSELECT
+ OSWITCH
+ OTYPESW
+ OTCHAN
+ OTMAP
+ OTSTRUCT
+ OTINTER
+ OTFUNC
+ OTARRAY
+ ODDD
+ ODDDARG
+ OINLCALL
+ OEFACE
+ OITAB
+ OSPTR
+ OCLOSUREVAR
+ OCFUNC
+ OCHECKNIL
+ OVARKILL
+ OREGISTER
+ OINDREG
+ OCMP
+ ODEC
+ OINC
+ OEXTEND
+ OHMUL
+ OLROT
+ ORROTC
+ ORETJMP
+ OEND
+)
+
+const (
+ Txxx = iota
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+ TCOMPLEX64
+ TCOMPLEX128
+ TFLOAT32
+ TFLOAT64
+ TBOOL
+ TPTR32
+ TPTR64
+ TFUNC
+ TARRAY
+ T_old_DARRAY
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TFIELD
+ TANY
+ TSTRING
+ TUNSAFEPTR
+ TIDEAL
+ TNIL
+ TBLANK
+ TFUNCARGS
+ TCHANARGS
+ TINTERMETH
+ NTYPE
+)
+
+const (
+ CTxxx = iota
+ CTINT
+ CTRUNE
+ CTFLT
+ CTCPLX
+ CTSTR
+ CTBOOL
+ CTNIL
+)
+
+const (
+ Cxxx = 0
+ Crecv = 1 << 0
+ Csend = 1 << 1
+ Cboth = Crecv | Csend
+)
+
+// declaration context
+const (
+ Pxxx = iota
+ PEXTERN
+ PAUTO
+ PPARAM
+ PPARAMOUT
+ PPARAMREF
+ PFUNC
+ PDISCARD
+ PHEAP = 1 << 7
+)
+
+const (
+ Etop = 1 << 1
+ Erv = 1 << 2
+ Etype = 1 << 3
+ Ecall = 1 << 4
+ Efnstruct = 1 << 5
+ Eiota = 1 << 6
+ Easgn = 1 << 7
+ Eindir = 1 << 8
+ Eaddr = 1 << 9
+ Eproc = 1 << 10
+ Ecomplit = 1 << 11
+)
+
+const (
+ BITS = 3
+ NVAR = BITS * 64
+)
+
+type Bits struct {
+ b [BITS]uint64
+}
+
+var zbits Bits
+
+type Var struct {
+ offset int64
+ node *Node
+ nextinnode *Var
+ width int
+ id int
+ name int8
+ etype int8
+ addr int8
+}
+
+var var_ [NVAR]Var
+
+type Typedef struct {
+ Name string
+ Etype int
+ Sameas int
+}
+
+type Sig struct {
+ name string
+ pkg *Pkg
+ isym *Sym
+ tsym *Sym
+ type_ *Type
+ mtype *Type
+ offset int32
+ link *Sig
+}
+
+type Io struct {
+ infile string
+ bin *obj.Biobuf
+ ilineno int32
+ nlsemi int
+ eofnl int
+ last int
+ peekc int
+ peekc1 int
+ cp string
+ importsafe bool
+}
+
+type Dlist struct {
+ field *Type
+}
+
+type Idir struct {
+ link *Idir
+ dir string
+}
+
+/*
+ * argument passing to/from
+ * smagic and umagic
+ */
+type Magic struct {
+ W int
+ S int
+ Bad int
+ Sd int64
+ Sm int64
+ Ud uint64
+ Um uint64
+ Ua int
+}
+
+/*
+ * note this is the runtime representation
+ * of the compilers arrays.
+ *
+ * typedef struct
+ * { // must not move anything
+ * uchar array[8]; // pointer to data
+ * uchar nel[4]; // number of elements
+ * uchar cap[4]; // allocated number of elements
+ * } Array;
+ */
+var Array_array int // runtime offsetof(Array,array) - same for String
+
+var Array_nel int // runtime offsetof(Array,nel) - same for String
+
+var Array_cap int // runtime offsetof(Array,cap)
+
+var sizeof_Array int // runtime sizeof(Array)
+
+/*
+ * note this is the runtime representation
+ * of the compilers strings.
+ *
+ * typedef struct
+ * { // must not move anything
+ * uchar array[8]; // pointer to data
+ * uchar nel[4]; // number of elements
+ * } String;
+ */
+var sizeof_String int // runtime sizeof(String)
+
+var dotlist [10]Dlist // size is max depth of embeddeds
+
+var curio Io
+
+var pushedio Io
+
+var lexlineno int32
+
+var lineno int32
+
+var prevlineno int32
+
+var pragcgobuf string
+
+var infile string
+
+var outfile string
+
+var bout *obj.Biobuf
+
+var nerrors int
+
+var nsavederrors int
+
+var nsyntaxerrors int
+
+var decldepth int
+
+var safemode int
+
+var nolocalimports int
+
+var namebuf string
+
+var lexbuf bytes.Buffer
+var strbuf bytes.Buffer
+
+func DBG(...interface{}) {}
+
+var litbuf string
+
+var Debug [256]int
+
+var debugstr string
+
+var Debug_checknil int
+
+var hash [NHASH]*Sym
+
+var importmyname *Sym // my name for package
+
+var localpkg *Pkg // package being compiled
+
+var importpkg *Pkg // package being imported
+
+var structpkg *Pkg // package that declared struct, during import
+
+var builtinpkg *Pkg // fake package for builtins
+
+var gostringpkg *Pkg // fake pkg for Go strings
+
+var itabpkg *Pkg // fake pkg for itab cache
+
+var Runtimepkg *Pkg // package runtime
+
+var racepkg *Pkg // package runtime/race
+
+var stringpkg *Pkg // fake package for C strings
+
+var typepkg *Pkg // fake package for runtime type info (headers)
+
+var typelinkpkg *Pkg // fake package for runtime type info (data)
+
+var weaktypepkg *Pkg // weak references to runtime type info
+
+var unsafepkg *Pkg // package unsafe
+
+var trackpkg *Pkg // fake package for field tracking
+
+var rawpkg *Pkg // fake package for raw symbol names
+
+var phash [128]*Pkg
+
+var Tptr int // either TPTR32 or TPTR64
+
+var myimportpath string
+
+var idirs *Idir
+
+var localimport string
+
+var asmhdr string
+
+var Types [NTYPE]*Type
+
+var idealstring *Type
+
+var idealbool *Type
+
+var bytetype *Type
+
+var runetype *Type
+
+var errortype *Type
+
+var Simtype [NTYPE]uint8
+
+var Isptr [NTYPE]uint8
+
+var isforw [NTYPE]uint8
+
+var Isint [NTYPE]uint8
+
+var Isfloat [NTYPE]uint8
+
+var Iscomplex [NTYPE]uint8
+
+var Issigned [NTYPE]uint8
+
+var issimple [NTYPE]uint8
+
+var okforeq [NTYPE]uint8
+
+var okforadd [NTYPE]uint8
+
+var okforand [NTYPE]uint8
+
+var okfornone [NTYPE]uint8
+
+var okforcmp [NTYPE]uint8
+
+var okforbool [NTYPE]uint8
+
+var okforcap [NTYPE]uint8
+
+var okforlen [NTYPE]uint8
+
+var okforarith [NTYPE]uint8
+
+var okforconst [NTYPE]uint8
+
+var okfor [OEND][]byte
+
+var iscmp [OEND]uint8
+
+var Minintval [NTYPE]*Mpint
+
+var Maxintval [NTYPE]*Mpint
+
+var minfltval [NTYPE]*Mpflt
+
+var maxfltval [NTYPE]*Mpflt
+
+var xtop *NodeList
+
+var externdcl *NodeList
+
+var exportlist *NodeList
+
+var importlist *NodeList // imported functions and methods with inlinable bodies
+
+var funcsyms *NodeList
+
+var dclcontext int // PEXTERN/PAUTO
+
+var incannedimport int
+
+var statuniqgen int // name generator for static temps
+
+var loophack int
+
+var iota_ int32
+
+var lastconst *NodeList
+
+var lasttype *Node
+
+var Maxarg int64
+
+var Stksize int64 // stack size for current frame
+
+var stkptrsize int64 // prefix of stack containing pointers
+
+var blockgen int32 // max block number
+
+var block int32 // current block number
+
+var Hasdefer int // flag that curfn has defer statetment
+
+var Curfn *Node
+
+var Widthptr int
+
+var Widthint int
+
+var Widthreg int
+
+var typesw *Node
+
+var nblank *Node
+
+var Use_sse int
+
+var thunk int32
+
+var Funcdepth int
+
+var typecheckok int
+
+var compiling_runtime int
+
+var compiling_wrappers int
+
+var inl_nonlocal int
+
+var use_writebarrier int
+
+var pure_go int
+
+var flag_installsuffix string
+
+var flag_race int
+
+var flag_largemodel int
+
+var noescape bool
+
+var nosplit bool
+
+var nowritebarrier bool
+
+var debuglive int
+
+var Ctxt *obj.Link
+
+var nointerface bool
+
+var writearchive int
+
+var bstdout obj.Biobuf
+
+var Nacl bool
+
+/*
+ * y.tab.c
+ */
+
+/*
+ * align.c
+ */
+
+/*
+ * array.c
+ */
+
+/*
+ * bits.c
+ */
+
+/*
+ * mparith1.c
+ */
+
+/*
+ * mparith2.c
+ */
+
+/*
+ * mparith3.c
+ */
+
+/*
+ * obj.c
+ */
+
+/*
+ * order.c
+ */
+
+/*
+ * range.c
+ */
+
+/*
+ * reflect.c
+ */
+
+/*
+ * select.c
+ */
+
+/*
+ * sinit.c
+ */
+
+/*
+ * subr.c
+ */
+
+/*
+ * swt.c
+ */
+
+/*
+ * typecheck.c
+ */
+
+/*
+ * unsafe.c
+ */
+
+/*
+ * walk.c
+ */
+
+/*
+ * thearch-specific ggen.c/gsubr.c/gobj.c/pgen.c/plive.c
+ */
+var continpc *obj.Prog
+
+var breakpc *obj.Prog
+
+var Pc *obj.Prog
+
+var firstpc *obj.Prog
+
+var nodfp *Node
+
+var Disable_checknil int
+
+var zerosize int64
+
+/*
+ * racewalk.c
+ */
+
+/*
+ * flow.c
+ */
+type Flow struct {
+ Prog *obj.Prog
+ P1 *Flow
+ P2 *Flow
+ P2link *Flow
+ S1 *Flow
+ S2 *Flow
+ Link *Flow
+ Active int32
+ Id int32
+ Rpo int32
+ Loop uint16
+ Refset uint8
+ Data interface{}
+}
+
+type Graph struct {
+ Start *Flow
+ Num int
+ Rpo []*Flow
+}
+
+/*
+ * interface to back end
+ */
+type ProgInfo struct {
+ Flags uint32
+ Reguse uint64
+ Regset uint64
+ Regindex uint64
+}
+
+const (
+ Pseudo = 1 << 1
+ OK = 1 << 2
+ SizeB = 1 << 3
+ SizeW = 1 << 4
+ SizeL = 1 << 5
+ SizeQ = 1 << 6
+ SizeF = 1 << 7
+ SizeD = 1 << 8
+ LeftAddr = 1 << 9
+ LeftRead = 1 << 10
+ LeftWrite = 1 << 11
+ RegRead = 1 << 12
+ CanRegRead = 1 << 13
+ RightAddr = 1 << 14
+ RightRead = 1 << 15
+ RightWrite = 1 << 16
+ Move = 1 << 17
+ Conv = 1 << 18
+ Cjmp = 1 << 19
+ Break = 1 << 20
+ Call = 1 << 21
+ Jump = 1 << 22
+ Skip = 1 << 23
+ SetCarry = 1 << 24
+ UseCarry = 1 << 25
+ KillCarry = 1 << 26
+ ShiftCX = 1 << 27
+ ImulAXDX = 1 << 28
+ PostInc = 1 << 29
+)
+
+type Arch struct {
+ ByteOrder binary.ByteOrder
+ Thechar int
+ Thestring string
+ Thelinkarch *obj.LinkArch
+ Typedefs []Typedef
+ REGSP int
+ REGCTXT int
+ MAXWIDTH int64
+ Anyregalloc func() int
+ Betypeinit func()
+ Bgen func(*Node, bool, int, *obj.Prog)
+ Cgen func(*Node, *Node)
+ Cgen_call func(*Node, int)
+ Cgen_callinter func(*Node, *Node, int)
+ Cgen_ret func(*Node)
+ Clearfat func(*Node)
+ Defframe func(*obj.Prog)
+ Excise func(*Flow)
+ Expandchecks func(*obj.Prog)
+ Gclean func()
+ Ginit func()
+ Gins func(int, *Node, *Node) *obj.Prog
+ Ginscall func(*Node, int)
+ Igen func(*Node, *Node, *Node)
+ Linkarchinit func()
+ Peep func(*obj.Prog)
+ Proginfo func(*ProgInfo, *obj.Prog)
+ Regalloc func(*Node, *Type, *Node)
+ Regfree func(*Node)
+ Regtyp func(*obj.Addr) int
+ Sameaddr func(*obj.Addr, *obj.Addr) int
+ Smallindir func(*obj.Addr, *obj.Addr) int
+ Stackaddr func(*obj.Addr) int
+ Excludedregs func() uint64
+ RtoB func(int) uint64
+ FtoB func(int) uint64
+ BtoR func(uint64) int
+ BtoF func(uint64) int
+ Optoas func(int, *Type) int
+ Doregbits func(int) uint64
+ Regnames func(*int) []string
+}
+
+var pcloc int32
+
+var Thearch Arch
+
+var Newproc *Node
+
+var Deferproc *Node
+
+var Deferreturn *Node
+
+var Panicindex *Node
+
+var panicslice *Node
+
+var throwreturn *Node
diff --git a/src/cmd/internal/gc/go.y b/src/cmd/internal/gc/go.y
new file mode 100644
index 0000000000..90384499fd
--- /dev/null
+++ b/src/cmd/internal/gc/go.y
@@ -0,0 +1,2252 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Go language grammar.
+ *
+ * The Go semicolon rules are:
+ *
+ * 1. all statements and declarations are terminated by semicolons.
+ * 2. semicolons can be omitted before a closing ) or }.
+ * 3. semicolons are inserted by the lexer before a newline
+ * following a specific list of tokens.
+ *
+ * Rules #1 and #2 are accomplished by writing the lists as
+ * semicolon-separated lists with an optional trailing semicolon.
+ * Rule #3 is implemented in yylex.
+ */
+
+%{
+package gc
+
+import (
+ "strings"
+)
+%}
+%union {
+ node *Node
+ list *NodeList
+ typ *Type
+ sym *Sym
+ val Val
+ i int
+}
+
+// |sed 's/.* //' |9 fmt -l1 |sort |9 fmt -l50 | sed 's/^/%xxx /'
+
+%token <val> LLITERAL
+%token <i> LASOP LCOLAS
+%token <sym> LBREAK LCASE LCHAN LCONST LCONTINUE LDDD
+%token <sym> LDEFAULT LDEFER LELSE LFALL LFOR LFUNC LGO LGOTO
+%token <sym> LIF LIMPORT LINTERFACE LMAP LNAME
+%token <sym> LPACKAGE LRANGE LRETURN LSELECT LSTRUCT LSWITCH
+%token <sym> LTYPE LVAR
+
+%token LANDAND LANDNOT LBODY LCOMM LDEC LEQ LGE LGT
+%token LIGNORE LINC LLE LLSH LLT LNE LOROR LRSH
+
+%type <i> lbrace import_here
+%type <sym> sym packname
+%type <val> oliteral
+
+%type <node> stmt ntype
+%type <node> arg_type
+%type <node> case caseblock
+%type <node> compound_stmt dotname embed expr complitexpr bare_complitexpr
+%type <node> expr_or_type
+%type <node> fndcl hidden_fndcl fnliteral
+%type <node> for_body for_header for_stmt if_header if_stmt non_dcl_stmt
+%type <node> interfacedcl keyval labelname name
+%type <node> name_or_type non_expr_type
+%type <node> new_name dcl_name oexpr typedclname
+%type <node> onew_name
+%type <node> osimple_stmt pexpr pexpr_no_paren
+%type <node> pseudocall range_stmt select_stmt
+%type <node> simple_stmt
+%type <node> switch_stmt uexpr
+%type <node> xfndcl typedcl start_complit
+
+%type <list> xdcl fnbody fnres loop_body dcl_name_list
+%type <list> new_name_list expr_list keyval_list braced_keyval_list expr_or_type_list xdcl_list
+%type <list> oexpr_list caseblock_list elseif elseif_list else stmt_list oarg_type_list_ocomma arg_type_list
+%type <list> interfacedcl_list vardcl vardcl_list structdcl structdcl_list
+%type <list> common_dcl constdcl constdcl1 constdcl_list typedcl_list
+
+%type <node> convtype comptype dotdotdot
+%type <node> indcl interfacetype structtype ptrtype
+%type <node> recvchantype non_recvchantype othertype fnret_type fntype
+
+%type <sym> hidden_importsym hidden_pkg_importsym
+
+%type <node> hidden_constant hidden_literal hidden_funarg
+%type <node> hidden_interfacedcl hidden_structdcl
+
+%type <list> hidden_funres
+%type <list> ohidden_funres
+%type <list> hidden_funarg_list ohidden_funarg_list
+%type <list> hidden_interfacedcl_list ohidden_interfacedcl_list
+%type <list> hidden_structdcl_list ohidden_structdcl_list
+
+%type <typ> hidden_type hidden_type_misc hidden_pkgtype
+%type <typ> hidden_type_func
+%type <typ> hidden_type_recv_chan hidden_type_non_recv_chan
+
+%left LCOMM /* outside the usual hierarchy; here for good error messages */
+
+%left LOROR
+%left LANDAND
+%left LEQ LNE LLE LGE LLT LGT
+%left '+' '-' '|' '^'
+%left '*' '/' '%' '&' LLSH LRSH LANDNOT
+
+/*
+ * manual override of shift/reduce conflicts.
+ * the general form is that we assign a precedence
+ * to the token being shifted and then introduce
+ * NotToken with lower precedence or PreferToToken with higher
+ * and annotate the reducing rule accordingly.
+ */
+%left NotPackage
+%left LPACKAGE
+
+%left NotParen
+%left '('
+
+%left ')'
+%left PreferToRightParen
+
+// TODO(rsc): Add %error-verbose
+
+%%
+file:
+ loadsys
+ package
+ imports
+ xdcl_list
+ {
+ xtop = concat(xtop, $4);
+ }
+
+package:
+ %prec NotPackage
+ {
+ prevlineno = lineno;
+ Yyerror("package statement must be first");
+ errorexit();
+ }
+| LPACKAGE sym ';'
+ {
+ mkpackage($2.Name);
+ }
+
+/*
+ * this loads the definitions for the low-level runtime functions,
+ * so that the compiler can generate calls to them,
+ * but does not make the name "runtime" visible as a package.
+ */
+loadsys:
+ {
+ importpkg = Runtimepkg;
+
+ if Debug['A'] != 0 {
+ cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n");
+ } else {
+ cannedimports("runtime.Builtin", runtimeimport);
+ }
+ curio.importsafe = true
+ }
+ import_package
+ import_there
+ {
+ importpkg = nil;
+ }
+
+imports:
+| imports import ';'
+
+import:
+ LIMPORT import_stmt
+| LIMPORT '(' import_stmt_list osemi ')'
+| LIMPORT '(' ')'
+
+import_stmt:
+ import_here import_package import_there
+ {
+ ipkg := importpkg;
+ my := importmyname;
+ importpkg = nil;
+ importmyname = nil;
+
+ if my == nil {
+ my = Lookup(ipkg.Name);
+ }
+
+ pack := Nod(OPACK, nil, nil);
+ pack.Sym = my;
+ pack.Pkg = ipkg;
+ pack.Lineno = int32($1);
+
+ if strings.HasPrefix(my.Name, ".") {
+ importdot(ipkg, pack);
+ break;
+ }
+ if my.Name == "init" {
+ Yyerror("cannot import package as init - init must be a func");
+ break;
+ }
+ if my.Name == "_" {
+ break;
+ }
+ if my.Def != nil {
+ lineno = int32($1);
+ redeclare(my, "as imported package name");
+ }
+ my.Def = pack;
+ my.Lastlineno = int32($1);
+ my.Block = 1; // at top level
+ }
+| import_here import_there
+ {
+ // When an invalid import path is passed to importfile,
+ // it calls Yyerror and then sets up a fake import with
+ // no package statement. This allows us to test more
+ // than one invalid import statement in a single file.
+ if nerrors == 0 {
+ Fatal("phase error in import");
+ }
+ }
+
+import_stmt_list:
+ import_stmt
+| import_stmt_list ';' import_stmt
+
+import_here:
+ LLITERAL
+ {
+ // import with original name
+ $$ = parserline();
+ importmyname = nil;
+ importfile(&$1, $$);
+ }
+| sym LLITERAL
+ {
+ // import with given name
+ $$ = parserline();
+ importmyname = $1;
+ importfile(&$2, $$);
+ }
+| '.' LLITERAL
+ {
+ // import into my name space
+ $$ = parserline();
+ importmyname = Lookup(".");
+ importfile(&$2, $$);
+ }
+
+import_package:
+ LPACKAGE LNAME import_safety ';'
+ {
+ if importpkg.Name == "" {
+ importpkg.Name = $2.Name;
+ Pkglookup($2.Name, nil).Npkg++;
+ } else if importpkg.Name != $2.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", importpkg.Name, $2.Name, Zconv(importpkg.Path, 0));
+ }
+ importpkg.Direct = 1;
+ importpkg.Safe = curio.importsafe
+
+ if safemode != 0 && !curio.importsafe {
+ Yyerror("cannot import unsafe package \"%v\"", Zconv(importpkg.Path, 0));
+ }
+ }
+
+import_safety:
+| LNAME
+ {
+ if $1.Name == "safe" {
+ curio.importsafe = true
+ }
+ }
+
+import_there:
+ {
+ defercheckwidth();
+ }
+ hidden_import_list '$' '$'
+ {
+ resumecheckwidth();
+ unimportfile();
+ }
+
+/*
+ * declarations
+ */
+xdcl:
+ {
+ Yyerror("empty top-level declaration");
+ $$ = nil;
+ }
+| common_dcl
+| xfndcl
+ {
+ $$ = list1($1);
+ }
+| non_dcl_stmt
+ {
+ Yyerror("non-declaration statement outside function body");
+ $$ = nil;
+ }
+| error
+ {
+ $$ = nil;
+ }
+
+common_dcl:
+ LVAR vardcl
+ {
+ $$ = $2;
+ }
+| LVAR '(' vardcl_list osemi ')'
+ {
+ $$ = $3;
+ }
+| LVAR '(' ')'
+ {
+ $$ = nil;
+ }
+| lconst constdcl
+ {
+ $$ = $2;
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' constdcl osemi ')'
+ {
+ $$ = $3;
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' constdcl ';' constdcl_list osemi ')'
+ {
+ $$ = concat($3, $5);
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' ')'
+ {
+ $$ = nil;
+ iota_ = -100000;
+ }
+| LTYPE typedcl
+ {
+ $$ = list1($2);
+ }
+| LTYPE '(' typedcl_list osemi ')'
+ {
+ $$ = $3;
+ }
+| LTYPE '(' ')'
+ {
+ $$ = nil;
+ }
+
+lconst:
+ LCONST
+ {
+ iota_ = 0;
+ }
+
+vardcl:
+ dcl_name_list ntype
+ {
+ $$ = variter($1, $2, nil);
+ }
+| dcl_name_list ntype '=' expr_list
+ {
+ $$ = variter($1, $2, $4);
+ }
+| dcl_name_list '=' expr_list
+ {
+ $$ = variter($1, nil, $3);
+ }
+
+constdcl:
+ dcl_name_list ntype '=' expr_list
+ {
+ $$ = constiter($1, $2, $4);
+ }
+| dcl_name_list '=' expr_list
+ {
+ $$ = constiter($1, nil, $3);
+ }
+
+constdcl1:
+ constdcl
+| dcl_name_list ntype
+ {
+ $$ = constiter($1, $2, nil);
+ }
+| dcl_name_list
+ {
+ $$ = constiter($1, nil, nil);
+ }
+
+typedclname:
+ sym
+ {
+ // different from dclname because the name
+ // becomes visible right here, not at the end
+ // of the declaration.
+ $$ = typedcl0($1);
+ }
+
+typedcl:
+ typedclname ntype
+ {
+ $$ = typedcl1($1, $2, 1);
+ }
+
+simple_stmt:
+ expr
+ {
+ $$ = $1;
+
+ // These nodes do not carry line numbers.
+ // Since a bare name used as an expression is an error,
+ // introduce a wrapper node to give the correct line.
+ switch($$.Op) {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ $$ = Nod(OPAREN, $$, nil);
+ $$.Implicit = 1;
+ break;
+ }
+ }
+| expr LASOP expr
+ {
+ $$ = Nod(OASOP, $1, $3);
+ $$.Etype = uint8($2); // rathole to pass opcode
+ }
+| expr_list '=' expr_list
+ {
+ if $1.Next == nil && $3.Next == nil {
+ // simple
+ $$ = Nod(OAS, $1.N, $3.N);
+ break;
+ }
+ // multiple
+ $$ = Nod(OAS2, nil, nil);
+ $$.List = $1;
+ $$.Rlist = $3;
+ }
+| expr_list LCOLAS expr_list
+ {
+ if $3.N.Op == OTYPESW {
+ $$ = Nod(OTYPESW, nil, $3.N.Right);
+ if $3.Next != nil {
+ Yyerror("expr.(type) must be alone in list");
+ }
+ if $1.Next != nil {
+ Yyerror("argument count mismatch: %d = %d", count($1), 1);
+ } else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME) || isblank($1.N) {
+ Yyerror("invalid variable name %nil in type switch", $1.N);
+ } else {
+ $$.Left = dclname($1.N.Sym);
+ } // it's a colas, so must not re-use an oldname.
+ break;
+ }
+ $$ = colas($1, $3, int32($2));
+ }
+| expr LINC
+ {
+ $$ = Nod(OASOP, $1, Nodintconst(1));
+ $$.Implicit = 1;
+ $$.Etype = OADD;
+ }
+| expr LDEC
+ {
+ $$ = Nod(OASOP, $1, Nodintconst(1));
+ $$.Implicit = 1;
+ $$.Etype = OSUB;
+ }
+
+case:
+ LCASE expr_or_type_list ':'
+ {
+ var n, nn *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ $$.List = $2;
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym);
+ declare(nn, dclcontext);
+ $$.Nname = nn;
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right;
+ }
+ }
+ }
+| LCASE expr_or_type_list '=' expr ':'
+ {
+ var n *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ if $2.Next == nil {
+ n = Nod(OAS, $2.N, $4);
+ } else {
+ n = Nod(OAS2, nil, nil);
+ n.List = $2;
+ n.Rlist = list1($4);
+ }
+ $$.List = list1(n);
+ }
+| LCASE expr_or_type_list LCOLAS expr ':'
+ {
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ $$.List = list1(colas($2, list1($4), int32($3)));
+ }
+| LDEFAULT ':'
+ {
+ var n, nn *Node
+
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym);
+ declare(nn, dclcontext);
+ $$.Nname = nn;
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right;
+ }
+ }
+ }
+
+compound_stmt:
+ '{'
+ {
+ markdcl();
+ }
+ stmt_list '}'
+ {
+ if $3 == nil {
+ $$ = Nod(OEMPTY, nil, nil);
+ } else {
+ $$ = liststmt($3);
+ }
+ popdcl();
+ }
+
+caseblock:
+ case
+ {
+ // If the last token read by the lexer was consumed
+ // as part of the case, clear it (parser has cleared yychar).
+ // If the last token read by the lexer was the lookahead
+ // leave it alone (parser has it cached in yychar).
+ // This is so that the stmt_list action doesn't look at
+ // the case tokens if the stmt_list is empty.
+ yylast = yychar;
+ $1.Xoffset = int64(block);
+ }
+ stmt_list
+ {
+ // This is the only place in the language where a statement
+ // list is not allowed to drop the final semicolon, because
+ // it's the only place where a statement list is not followed
+ // by a closing brace. Handle the error for pedantry.
+
+ // Find the final token of the statement list.
+ // yylast is lookahead; yyprev is last of stmt_list
+ last := yyprev;
+
+ if last > 0 && last != ';' && yychar != '}' {
+ Yyerror("missing statement after label");
+ }
+ $$ = $1;
+ $$.Nbody = $3;
+ popdcl();
+ }
+
+caseblock_list:
+ {
+ $$ = nil;
+ }
+| caseblock_list caseblock
+ {
+ $$ = list($1, $2);
+ }
+
+loop_body:
+ LBODY
+ {
+ markdcl();
+ }
+ stmt_list '}'
+ {
+ $$ = $3;
+ popdcl();
+ }
+
+range_stmt:
+ expr_list '=' LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $4);
+ $$.List = $1;
+ $$.Etype = 0; // := flag
+ }
+| expr_list LCOLAS LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $4);
+ $$.List = $1;
+ $$.Colas = 1;
+ colasdefn($1, $$);
+ }
+| LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $2);
+ $$.Etype = 0; // := flag
+ }
+
+for_header:
+ osimple_stmt ';' osimple_stmt ';' osimple_stmt
+ {
+ // init ; test ; incr
+ if $5 != nil && $5.Colas != 0 {
+ Yyerror("cannot declare in the for-increment");
+ }
+ $$ = Nod(OFOR, nil, nil);
+ if $1 != nil {
+ $$.Ninit = list1($1);
+ }
+ $$.Ntest = $3;
+ $$.Nincr = $5;
+ }
+| osimple_stmt
+ {
+ // normal test
+ $$ = Nod(OFOR, nil, nil);
+ $$.Ntest = $1;
+ }
+| range_stmt
+
+for_body:
+ for_header loop_body
+ {
+ $$ = $1;
+ $$.Nbody = concat($$.Nbody, $2);
+ }
+
+for_stmt:
+ LFOR
+ {
+ markdcl();
+ }
+ for_body
+ {
+ $$ = $3;
+ popdcl();
+ }
+
+if_header:
+ osimple_stmt
+ {
+ // test
+ $$ = Nod(OIF, nil, nil);
+ $$.Ntest = $1;
+ }
+| osimple_stmt ';' osimple_stmt
+ {
+ // init ; test
+ $$ = Nod(OIF, nil, nil);
+ if $1 != nil {
+ $$.Ninit = list1($1);
+ }
+ $$.Ntest = $3;
+ }
+
+/* IF cond body (ELSE IF cond body)* (ELSE block)? */
+if_stmt:
+ LIF
+ {
+ markdcl();
+ }
+ if_header
+ {
+ if $3.Ntest == nil {
+ Yyerror("missing condition in if statement");
+ }
+ }
+ loop_body
+ {
+ $3.Nbody = $5;
+ }
+ elseif_list else
+ {
+ var n *Node
+ var nn *NodeList
+
+ $$ = $3;
+ n = $3;
+ popdcl();
+ for nn = concat($7, $8); nn != nil; nn = nn.Next {
+ if nn.N.Op == OIF {
+ popdcl();
+ }
+ n.Nelse = list1(nn.N);
+ n = nn.N;
+ }
+ }
+
+elseif:
+ LELSE LIF
+ {
+ markdcl();
+ }
+ if_header loop_body
+ {
+ if $4.Ntest == nil {
+ Yyerror("missing condition in if statement");
+ }
+ $4.Nbody = $5;
+ $$ = list1($4);
+ }
+
+elseif_list:
+ {
+ $$ = nil;
+ }
+| elseif_list elseif
+ {
+ $$ = concat($1, $2);
+ }
+
+else:
+ {
+ $$ = nil;
+ }
+| LELSE compound_stmt
+ {
+ l := &NodeList{N: $2}
+ l.End = l
+ $$ = l;
+ }
+
+switch_stmt:
+ LSWITCH
+ {
+ markdcl();
+ }
+ if_header
+ {
+ var n *Node
+ n = $3.Ntest;
+ if n != nil && n.Op != OTYPESW {
+ n = nil;
+ }
+ typesw = Nod(OXXX, typesw, n);
+ }
+ LBODY caseblock_list '}'
+ {
+ $$ = $3;
+ $$.Op = OSWITCH;
+ $$.List = $6;
+ typesw = typesw.Left;
+ popdcl();
+ }
+
+select_stmt:
+ LSELECT
+ {
+ typesw = Nod(OXXX, typesw, nil);
+ }
+ LBODY caseblock_list '}'
+ {
+ $$ = Nod(OSELECT, nil, nil);
+ $$.Lineno = typesw.Lineno;
+ $$.List = $4;
+ typesw = typesw.Left;
+ }
+
+/*
+ * expressions
+ */
+expr:
+ uexpr
+| expr LOROR expr
+ {
+ $$ = Nod(OOROR, $1, $3);
+ }
+| expr LANDAND expr
+ {
+ $$ = Nod(OANDAND, $1, $3);
+ }
+| expr LEQ expr
+ {
+ $$ = Nod(OEQ, $1, $3);
+ }
+| expr LNE expr
+ {
+ $$ = Nod(ONE, $1, $3);
+ }
+| expr LLT expr
+ {
+ $$ = Nod(OLT, $1, $3);
+ }
+| expr LLE expr
+ {
+ $$ = Nod(OLE, $1, $3);
+ }
+| expr LGE expr
+ {
+ $$ = Nod(OGE, $1, $3);
+ }
+| expr LGT expr
+ {
+ $$ = Nod(OGT, $1, $3);
+ }
+| expr '+' expr
+ {
+ $$ = Nod(OADD, $1, $3);
+ }
+| expr '-' expr
+ {
+ $$ = Nod(OSUB, $1, $3);
+ }
+| expr '|' expr
+ {
+ $$ = Nod(OOR, $1, $3);
+ }
+| expr '^' expr
+ {
+ $$ = Nod(OXOR, $1, $3);
+ }
+| expr '*' expr
+ {
+ $$ = Nod(OMUL, $1, $3);
+ }
+| expr '/' expr
+ {
+ $$ = Nod(ODIV, $1, $3);
+ }
+| expr '%' expr
+ {
+ $$ = Nod(OMOD, $1, $3);
+ }
+| expr '&' expr
+ {
+ $$ = Nod(OAND, $1, $3);
+ }
+| expr LANDNOT expr
+ {
+ $$ = Nod(OANDNOT, $1, $3);
+ }
+| expr LLSH expr
+ {
+ $$ = Nod(OLSH, $1, $3);
+ }
+| expr LRSH expr
+ {
+ $$ = Nod(ORSH, $1, $3);
+ }
+ /* not an expression anymore, but left in so we can give a good error */
+| expr LCOMM expr
+ {
+ $$ = Nod(OSEND, $1, $3);
+ }
+
+uexpr:
+ pexpr
+| '*' uexpr
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+| '&' uexpr
+ {
+ if $2.Op == OCOMPLIT {
+ // Special case for &T{...}: turn into (*T){...}.
+ $$ = $2;
+ $$.Right = Nod(OIND, $$.Right, nil);
+ $$.Right.Implicit = 1;
+ } else {
+ $$ = Nod(OADDR, $2, nil);
+ }
+ }
+| '+' uexpr
+ {
+ $$ = Nod(OPLUS, $2, nil);
+ }
+| '-' uexpr
+ {
+ $$ = Nod(OMINUS, $2, nil);
+ }
+| '!' uexpr
+ {
+ $$ = Nod(ONOT, $2, nil);
+ }
+| '~' uexpr
+ {
+ Yyerror("the bitwise complement operator is ^");
+ $$ = Nod(OCOM, $2, nil);
+ }
+| '^' uexpr
+ {
+ $$ = Nod(OCOM, $2, nil);
+ }
+| LCOMM uexpr
+ {
+ $$ = Nod(ORECV, $2, nil);
+ }
+
+/*
+ * call-like statements that
+ * can be preceded by 'defer' and 'go'
+ */
+pseudocall:
+ pexpr '(' ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ }
+| pexpr '(' expr_or_type_list ocomma ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = $3;
+ }
+| pexpr '(' expr_or_type_list LDDD ocomma ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = $3;
+ $$.Isddd = 1;
+ }
+
+pexpr_no_paren:
+ LLITERAL
+ {
+ $$ = nodlit($1);
+ }
+| name
+| pexpr '.' sym
+ {
+ if $1.Op == OPACK {
+ var s *Sym
+ s = restrictlookup($3.Name, $1.Pkg);
+ $1.Used = 1;
+ $$ = oldname(s);
+ break;
+ }
+ $$ = Nod(OXDOT, $1, newname($3));
+ }
+| pexpr '.' '(' expr_or_type ')'
+ {
+ $$ = Nod(ODOTTYPE, $1, $4);
+ }
+| pexpr '.' '(' LTYPE ')'
+ {
+ $$ = Nod(OTYPESW, nil, $1);
+ }
+| pexpr '[' expr ']'
+ {
+ $$ = Nod(OINDEX, $1, $3);
+ }
+| pexpr '[' oexpr ':' oexpr ']'
+ {
+ $$ = Nod(OSLICE, $1, Nod(OKEY, $3, $5));
+ }
+| pexpr '[' oexpr ':' oexpr ':' oexpr ']'
+ {
+ if $5 == nil {
+ Yyerror("middle index required in 3-index slice");
+ }
+ if $7 == nil {
+ Yyerror("final index required in 3-index slice");
+ }
+ $$ = Nod(OSLICE3, $1, Nod(OKEY, $3, Nod(OKEY, $5, $7)));
+ }
+| pseudocall
+| convtype '(' expr ocomma ')'
+ {
+ // conversion
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = list1($3);
+ }
+| comptype lbrace start_complit braced_keyval_list '}'
+ {
+ $$ = $3;
+ $$.Right = $1;
+ $$.List = $4;
+ fixlbrace($2);
+ }
+| pexpr_no_paren '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $3;
+ $$.Right = $1;
+ $$.List = $4;
+ }
+| '(' expr_or_type ')' '{' start_complit braced_keyval_list '}'
+ {
+ Yyerror("cannot parenthesize type in composite literal");
+ $$ = $5;
+ $$.Right = $2;
+ $$.List = $6;
+ }
+| fnliteral
+
+start_complit:
+ {
+ // composite expression.
+ // make node early so we get the right line number.
+ $$ = Nod(OCOMPLIT, nil, nil);
+ }
+
+keyval:
+ expr ':' complitexpr
+ {
+ $$ = Nod(OKEY, $1, $3);
+ }
+
+bare_complitexpr:
+ expr
+ {
+ // These nodes do not carry line numbers.
+ // Since a composite literal commonly spans several lines,
+ // the line number on errors may be misleading.
+ // Introduce a wrapper node to give the correct line.
+ $$ = $1;
+ switch($$.Op) {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ $$ = Nod(OPAREN, $$, nil);
+ $$.Implicit = 1;
+ }
+ }
+| '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $2;
+ $$.List = $3;
+ }
+
+complitexpr:
+ expr
+| '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $2;
+ $$.List = $3;
+ }
+
+pexpr:
+ pexpr_no_paren
+| '(' expr_or_type ')'
+ {
+ $$ = $2;
+
+ // Need to know on lhs of := whether there are ( ).
+ // Don't bother with the OPAREN in other cases:
+ // it's just a waste of memory and time.
+ switch($$.Op) {
+ case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+ $$ = Nod(OPAREN, $$, nil);
+ }
+ }
+
+expr_or_type:
+ expr
+| non_expr_type %prec PreferToRightParen
+
+name_or_type:
+ ntype
+
+lbrace:
+ LBODY
+ {
+ $$ = LBODY;
+ }
+| '{'
+ {
+ $$ = '{';
+ }
+
+/*
+ * names and types
+ * newname is used before declared
+ * oldname is used after declared
+ */
+new_name:
+ sym
+ {
+ if $1 == nil {
+ $$ = nil;
+ } else {
+ $$ = newname($1);
+ }
+ }
+
+dcl_name:
+ sym
+ {
+ $$ = dclname($1);
+ }
+
+onew_name:
+ {
+ $$ = nil;
+ }
+| new_name
+
+sym:
+ LNAME
+ {
+ $$ = $1;
+ // during imports, unqualified non-exported identifiers are from builtinpkg
+ if importpkg != nil && !exportname($1.Name) {
+ $$ = Pkglookup($1.Name, builtinpkg);
+ }
+ }
+| hidden_importsym
+| '?'
+ {
+ $$ = nil;
+ }
+
+hidden_importsym:
+ '@' LLITERAL '.' LNAME
+ {
+ var p *Pkg
+
+ if $2.U.Sval.S == "" {
+ p = importpkg;
+ } else {
+ if isbadimport($2.U.Sval) {
+ errorexit();
+ }
+ p = mkpkg($2.U.Sval);
+ }
+ $$ = Pkglookup($4.Name, p);
+ }
+| '@' LLITERAL '.' '?'
+ {
+ var p *Pkg
+
+ if $2.U.Sval.S == "" {
+ p = importpkg;
+ } else {
+ if isbadimport($2.U.Sval) {
+ errorexit();
+ }
+ p = mkpkg($2.U.Sval);
+ }
+ $$ = Pkglookup("?", p);
+ }
+
+name:
+ sym %prec NotParen
+ {
+ $$ = oldname($1);
+ if $$.Pack != nil {
+ $$.Pack.Used = 1;
+ }
+ }
+
+labelname:
+ new_name
+
+/*
+ * to avoid parsing conflicts, type is split into
+ * channel types
+ * function types
+ * parenthesized types
+ * any other type
+ * the type system makes additional restrictions,
+ * but those are not implemented in the grammar.
+ */
+dotdotdot:
+ LDDD
+ {
+ Yyerror("final argument in variadic function missing type");
+ $$ = Nod(ODDD, typenod(typ(TINTER)), nil);
+ }
+| LDDD ntype
+ {
+ $$ = Nod(ODDD, $2, nil);
+ }
+
+ntype:
+ recvchantype
+| fntype
+| othertype
+| ptrtype
+| dotname
+| '(' ntype ')'
+ {
+ $$ = $2;
+ }
+
+non_expr_type:
+ recvchantype
+| fntype
+| othertype
+| '*' non_expr_type
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+
+non_recvchantype:
+ fntype
+| othertype
+| ptrtype
+| dotname
+| '(' ntype ')'
+ {
+ $$ = $2;
+ }
+
+convtype:
+ fntype
+| othertype
+
+comptype:
+ othertype
+
+fnret_type:
+ recvchantype
+| fntype
+| othertype
+| ptrtype
+| dotname
+
+dotname:
+ name
+| name '.' sym
+ {
+ if $1.Op == OPACK {
+ var s *Sym
+ s = restrictlookup($3.Name, $1.Pkg);
+ $1.Used = 1;
+ $$ = oldname(s);
+ break;
+ }
+ $$ = Nod(OXDOT, $1, newname($3));
+ }
+
+othertype:
+ '[' oexpr ']' ntype
+ {
+ $$ = Nod(OTARRAY, $2, $4);
+ }
+| '[' LDDD ']' ntype
+ {
+ // array literal of nelem
+ $$ = Nod(OTARRAY, Nod(ODDD, nil, nil), $4);
+ }
+| LCHAN non_recvchantype
+ {
+ $$ = Nod(OTCHAN, $2, nil);
+ $$.Etype = Cboth;
+ }
+| LCHAN LCOMM ntype
+ {
+ $$ = Nod(OTCHAN, $3, nil);
+ $$.Etype = Csend;
+ }
+| LMAP '[' ntype ']' ntype
+ {
+ $$ = Nod(OTMAP, $3, $5);
+ }
+| structtype
+| interfacetype
+
+ptrtype:
+ '*' ntype
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+
+recvchantype:
+ LCOMM LCHAN ntype
+ {
+ $$ = Nod(OTCHAN, $3, nil);
+ $$.Etype = Crecv;
+ }
+
+structtype:
+ LSTRUCT lbrace structdcl_list osemi '}'
+ {
+ $$ = Nod(OTSTRUCT, nil, nil);
+ $$.List = $3;
+ fixlbrace($2);
+ }
+| LSTRUCT lbrace '}'
+ {
+ $$ = Nod(OTSTRUCT, nil, nil);
+ fixlbrace($2);
+ }
+
+interfacetype:
+ LINTERFACE lbrace interfacedcl_list osemi '}'
+ {
+ $$ = Nod(OTINTER, nil, nil);
+ $$.List = $3;
+ fixlbrace($2);
+ }
+| LINTERFACE lbrace '}'
+ {
+ $$ = Nod(OTINTER, nil, nil);
+ fixlbrace($2);
+ }
+
+/*
+ * function stuff
+ * all in one place to show how crappy it all is
+ */
+xfndcl:
+ LFUNC fndcl fnbody
+ {
+ $$ = $2;
+ if $$ == nil {
+ break;
+ }
+ if noescape && $3 != nil {
+ Yyerror("can only use //go:noescape with external func implementations");
+ }
+ $$.Nbody = $3;
+ $$.Endlineno = lineno;
+ $$.Noescape = noescape;
+ $$.Nosplit = nosplit;
+ $$.Nowritebarrier = nowritebarrier;
+ funcbody($$);
+ }
+
+fndcl:
+ sym '(' oarg_type_list_ocomma ')' fnres
+ {
+ var t *Node
+
+ $$ = nil;
+ $3 = checkarglist($3, 1);
+
+ if $1.Name == "init" {
+ $1 = renameinit();
+ if $3 != nil || $5 != nil {
+ Yyerror("func init must have no arguments and no return values");
+ }
+ }
+ if localpkg.Name == "main" && $1.Name == "main" {
+ if $3 != nil || $5 != nil {
+ Yyerror("func main must have no arguments and no return values");
+ }
+ }
+
+ t = Nod(OTFUNC, nil, nil);
+ t.List = $3;
+ t.Rlist = $5;
+
+ $$ = Nod(ODCLFUNC, nil, nil);
+ $$.Nname = newname($1);
+ $$.Nname.Defn = $$;
+ $$.Nname.Ntype = t; // TODO: check if nname already has an ntype
+ declare($$.Nname, PFUNC);
+
+ funchdr($$);
+ }
+| '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+ {
+ var rcvr, t *Node
+
+ $$ = nil;
+ $2 = checkarglist($2, 0);
+ $6 = checkarglist($6, 1);
+
+ if $2 == nil {
+ Yyerror("method has no receiver");
+ break;
+ }
+ if $2.Next != nil {
+ Yyerror("method has multiple receivers");
+ break;
+ }
+ rcvr = $2.N;
+ if rcvr.Op != ODCLFIELD {
+ Yyerror("bad receiver in method");
+ break;
+ }
+
+ t = Nod(OTFUNC, rcvr, nil);
+ t.List = $6;
+ t.Rlist = $8;
+
+ $$ = Nod(ODCLFUNC, nil, nil);
+ $$.Shortname = newname($4);
+ $$.Nname = methodname1($$.Shortname, rcvr.Right);
+ $$.Nname.Defn = $$;
+ $$.Nname.Ntype = t;
+ $$.Nname.Nointerface = nointerface;
+ declare($$.Nname, PFUNC);
+
+ funchdr($$);
+ }
+
+hidden_fndcl:
+ hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ var s *Sym
+ var t *Type
+
+ $$ = nil;
+
+ s = $1;
+ t = functype(nil, $3, $5);
+
+ importsym(s, ONAME);
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ dclcontext = PDISCARD; // since we skip funchdr below
+ break;
+ }
+ Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", Sconv(s, 0), Tconv(s.Def.Type, 0), Tconv(t, 0));
+ }
+
+ $$ = newname(s);
+ $$.Type = t;
+ declare($$, PFUNC);
+
+ funchdr($$);
+ }
+| '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = methodname1(newname($4), $2.N.Right);
+ $$.Type = functype($2.N, $6, $8);
+
+ checkwidth($$.Type);
+ addmethod($4, $$.Type, false, nointerface);
+ nointerface = false
+ funchdr($$);
+
+ // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+ // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+ // out by typecheck's lookdot as this $$.ttype. So by providing
+ // this back link here we avoid special casing there.
+ $$.Type.Nname = $$;
+ }
+
+fntype:
+ LFUNC '(' oarg_type_list_ocomma ')' fnres
+ {
+ $3 = checkarglist($3, 1);
+ $$ = Nod(OTFUNC, nil, nil);
+ $$.List = $3;
+ $$.Rlist = $5;
+ }
+
+fnbody:
+ {
+ $$ = nil;
+ }
+| '{' stmt_list '}'
+ {
+ $$ = $2;
+ if $$ == nil {
+ $$ = list1(Nod(OEMPTY, nil, nil));
+ }
+ }
+
+fnres:
+ %prec NotParen
+ {
+ $$ = nil;
+ }
+| fnret_type
+ {
+ $$ = list1(Nod(ODCLFIELD, nil, $1));
+ }
+| '(' oarg_type_list_ocomma ')'
+ {
+ $2 = checkarglist($2, 0);
+ $$ = $2;
+ }
+
+fnlitdcl:
+ fntype
+ {
+ closurehdr($1);
+ }
+
+fnliteral:
+ fnlitdcl lbrace stmt_list '}'
+ {
+ $$ = closurebody($3);
+ fixlbrace($2);
+ }
+| fnlitdcl error
+ {
+ $$ = closurebody(nil);
+ }
+
+/*
+ * lists of things
+ * note that they are left recursive
+ * to conserve yacc stack. they need to
+ * be reversed to interpret correctly
+ */
+xdcl_list:
+ {
+ $$ = nil;
+ }
+| xdcl_list xdcl ';'
+ {
+ $$ = concat($1, $2);
+ if nsyntaxerrors == 0 {
+ testdclstack();
+ }
+ nointerface = false
+ noescape = false
+ nosplit = false
+ nowritebarrier = false
+ }
+
+vardcl_list:
+ vardcl
+| vardcl_list ';' vardcl
+ {
+ $$ = concat($1, $3);
+ }
+
+constdcl_list:
+ constdcl1
+| constdcl_list ';' constdcl1
+ {
+ $$ = concat($1, $3);
+ }
+
+typedcl_list:
+ typedcl
+ {
+ $$ = list1($1);
+ }
+| typedcl_list ';' typedcl
+ {
+ $$ = list($1, $3);
+ }
+
+structdcl_list:
+ structdcl
+| structdcl_list ';' structdcl
+ {
+ $$ = concat($1, $3);
+ }
+
+interfacedcl_list:
+ interfacedcl
+ {
+ $$ = list1($1);
+ }
+| interfacedcl_list ';' interfacedcl
+ {
+ $$ = list($1, $3);
+ }
+
+structdcl:
+ new_name_list ntype oliteral
+ {
+ var l *NodeList
+
+ var n *Node
+ l = $1;
+ if l == nil {
+ // ? symbol, during import (list1(nil) == nil)
+ n = $2;
+ if n.Op == OIND {
+ n = n.Left;
+ }
+ n = embedded(n.Sym, importpkg);
+ n.Right = $2;
+ n.Val = $3;
+ $$ = list1(n);
+ break;
+ }
+
+ for l=$1; l != nil; l=l.Next {
+ l.N = Nod(ODCLFIELD, l.N, $2);
+ l.N.Val = $3;
+ }
+ }
+| embed oliteral
+ {
+ $1.Val = $2;
+ $$ = list1($1);
+ }
+| '(' embed ')' oliteral
+ {
+ $2.Val = $4;
+ $$ = list1($2);
+ Yyerror("cannot parenthesize embedded type");
+ }
+| '*' embed oliteral
+ {
+ $2.Right = Nod(OIND, $2.Right, nil);
+ $2.Val = $3;
+ $$ = list1($2);
+ }
+| '(' '*' embed ')' oliteral
+ {
+ $3.Right = Nod(OIND, $3.Right, nil);
+ $3.Val = $5;
+ $$ = list1($3);
+ Yyerror("cannot parenthesize embedded type");
+ }
+| '*' '(' embed ')' oliteral
+ {
+ $3.Right = Nod(OIND, $3.Right, nil);
+ $3.Val = $5;
+ $$ = list1($3);
+ Yyerror("cannot parenthesize embedded type");
+ }
+
+packname:
+ LNAME
+ {
+ var n *Node
+
+ $$ = $1;
+ n = oldname($1);
+ if n.Pack != nil {
+ n.Pack.Used = 1;
+ }
+ }
+| LNAME '.' sym
+ {
+ var pkg *Pkg
+
+ if $1.Def == nil || $1.Def.Op != OPACK {
+ Yyerror("%v is not a package", Sconv($1, 0));
+ pkg = localpkg;
+ } else {
+ $1.Def.Used = 1;
+ pkg = $1.Def.Pkg;
+ }
+ $$ = restrictlookup($3.Name, pkg);
+ }
+
+embed:
+ packname
+ {
+ $$ = embedded($1, localpkg);
+ }
+
+interfacedcl:
+ new_name indcl
+ {
+ $$ = Nod(ODCLFIELD, $1, $2);
+ ifacedcl($$);
+ }
+| packname
+ {
+ $$ = Nod(ODCLFIELD, nil, oldname($1));
+ }
+| '(' packname ')'
+ {
+ $$ = Nod(ODCLFIELD, nil, oldname($2));
+ Yyerror("cannot parenthesize embedded type");
+ }
+
+indcl:
+ '(' oarg_type_list_ocomma ')' fnres
+ {
+ // without func keyword
+ $2 = checkarglist($2, 1);
+ $$ = Nod(OTFUNC, fakethis(), nil);
+ $$.List = $2;
+ $$.Rlist = $4;
+ }
+
+/*
+ * function arguments.
+ */
+arg_type:
+ name_or_type
+| sym name_or_type
+ {
+ $$ = Nod(ONONAME, nil, nil);
+ $$.Sym = $1;
+ $$ = Nod(OKEY, $$, $2);
+ }
+| sym dotdotdot
+ {
+ $$ = Nod(ONONAME, nil, nil);
+ $$.Sym = $1;
+ $$ = Nod(OKEY, $$, $2);
+ }
+| dotdotdot
+
+arg_type_list:
+ arg_type
+ {
+ $$ = list1($1);
+ }
+| arg_type_list ',' arg_type
+ {
+ $$ = list($1, $3);
+ }
+
+oarg_type_list_ocomma:
+ {
+ $$ = nil;
+ }
+| arg_type_list ocomma
+ {
+ $$ = $1;
+ }
+
+/*
+ * statement
+ */
+stmt:
+ {
+ $$ = nil;
+ }
+| compound_stmt
+| common_dcl
+ {
+ $$ = liststmt($1);
+ }
+| non_dcl_stmt
+| error
+ {
+ $$ = nil;
+ }
+
+non_dcl_stmt:
+ simple_stmt
+| for_stmt
+| switch_stmt
+| select_stmt
+| if_stmt
+| labelname ':'
+ {
+ $1 = Nod(OLABEL, $1, nil);
+ $1.Sym = dclstack; // context, for goto restrictions
+ }
+ stmt
+ {
+ var l *NodeList
+
+ $1.Defn = $4;
+ l = list1($1);
+ if $4 != nil {
+ l = list(l, $4);
+ }
+ $$ = liststmt(l);
+ }
+| LFALL
+ {
+ // will be converted to OFALL
+ $$ = Nod(OXFALL, nil, nil);
+ $$.Xoffset = int64(block);
+ }
+| LBREAK onew_name
+ {
+ $$ = Nod(OBREAK, $2, nil);
+ }
+| LCONTINUE onew_name
+ {
+ $$ = Nod(OCONTINUE, $2, nil);
+ }
+| LGO pseudocall
+ {
+ $$ = Nod(OPROC, $2, nil);
+ }
+| LDEFER pseudocall
+ {
+ $$ = Nod(ODEFER, $2, nil);
+ }
+| LGOTO new_name
+ {
+ $$ = Nod(OGOTO, $2, nil);
+ $$.Sym = dclstack; // context, for goto restrictions
+ }
+| LRETURN oexpr_list
+ {
+ $$ = Nod(ORETURN, nil, nil);
+ $$.List = $2;
+ if $$.List == nil && Curfn != nil {
+ var l *NodeList
+
+ for l=Curfn.Dcl; l != nil; l=l.Next {
+ if l.N.Class == PPARAM {
+ continue;
+ }
+ if l.N.Class != PPARAMOUT {
+ break;
+ }
+ if l.N.Sym.Def != l.N {
+ Yyerror("%s is shadowed during return", l.N.Sym.Name);
+ }
+ }
+ }
+ }
+
+stmt_list:
+ stmt
+ {
+ $$ = nil;
+ if $1 != nil {
+ $$ = list1($1);
+ }
+ }
+| stmt_list ';' stmt
+ {
+ $$ = $1;
+ if $3 != nil {
+ $$ = list($$, $3);
+ }
+ }
+
+new_name_list:
+ new_name
+ {
+ $$ = list1($1);
+ }
+| new_name_list ',' new_name
+ {
+ $$ = list($1, $3);
+ }
+
+dcl_name_list:
+ dcl_name
+ {
+ $$ = list1($1);
+ }
+| dcl_name_list ',' dcl_name
+ {
+ $$ = list($1, $3);
+ }
+
+expr_list:
+ expr
+ {
+ $$ = list1($1);
+ }
+| expr_list ',' expr
+ {
+ $$ = list($1, $3);
+ }
+
+expr_or_type_list:
+ expr_or_type
+ {
+ $$ = list1($1);
+ }
+| expr_or_type_list ',' expr_or_type
+ {
+ $$ = list($1, $3);
+ }
+
+/*
+ * list of combo of keyval and val
+ */
+keyval_list:
+ keyval
+ {
+ $$ = list1($1);
+ }
+| bare_complitexpr
+ {
+ $$ = list1($1);
+ }
+| keyval_list ',' keyval
+ {
+ $$ = list($1, $3);
+ }
+| keyval_list ',' bare_complitexpr
+ {
+ $$ = list($1, $3);
+ }
+
+braced_keyval_list:
+ {
+ $$ = nil;
+ }
+| keyval_list ocomma
+ {
+ $$ = $1;
+ }
+
+/*
+ * optional things
+ */
+osemi:
+| ';'
+
+ocomma:
+| ','
+
+oexpr:
+ {
+ $$ = nil;
+ }
+| expr
+
+oexpr_list:
+ {
+ $$ = nil;
+ }
+| expr_list
+
+osimple_stmt:
+ {
+ $$ = nil;
+ }
+| simple_stmt
+
+ohidden_funarg_list:
+ {
+ $$ = nil;
+ }
+| hidden_funarg_list
+
+ohidden_structdcl_list:
+ {
+ $$ = nil;
+ }
+| hidden_structdcl_list
+
+ohidden_interfacedcl_list:
+ {
+ $$ = nil;
+ }
+| hidden_interfacedcl_list
+
+oliteral:
+ {
+ $$.Ctype = CTxxx;
+ }
+| LLITERAL
+
+/*
+ * import syntax from package header
+ */
+hidden_import:
+ LIMPORT LNAME LLITERAL ';'
+ {
+ importimport($2, $3.U.Sval);
+ }
+| LVAR hidden_pkg_importsym hidden_type ';'
+ {
+ importvar($2, $3);
+ }
+| LCONST hidden_pkg_importsym '=' hidden_constant ';'
+ {
+ importconst($2, Types[TIDEAL], $4);
+ }
+| LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+ {
+ importconst($2, $3, $5);
+ }
+| LTYPE hidden_pkgtype hidden_type ';'
+ {
+ importtype($2, $3);
+ }
+| LFUNC hidden_fndcl fnbody ';'
+ {
+ if $2 == nil {
+ dclcontext = PEXTERN; // since we skip the funcbody below
+ break;
+ }
+
+ $2.Inl = $3;
+
+ funcbody($2);
+ importlist = list(importlist, $2);
+
+ if Debug['E'] > 0 {
+ print("import [%v] func %lN \n", Zconv(importpkg.Path, 0), $2);
+ if Debug['m'] > 2 && $2.Inl != nil {
+ print("inl body:%+H\n", $2.Inl);
+ }
+ }
+ }
+
+hidden_pkg_importsym:
+ hidden_importsym
+ {
+ $$ = $1;
+ structpkg = $$.Pkg;
+ }
+
+hidden_pkgtype:
+ hidden_pkg_importsym
+ {
+ $$ = pkgtype($1);
+ importsym($1, OTYPE);
+ }
+
+/*
+ * importing types
+ */
+
+hidden_type:
+ hidden_type_misc
+| hidden_type_recv_chan
+| hidden_type_func
+
+hidden_type_non_recv_chan:
+ hidden_type_misc
+| hidden_type_func
+
+hidden_type_misc:
+ hidden_importsym
+ {
+ $$ = pkgtype($1);
+ }
+| LNAME
+ {
+ // predefined name like uint8
+ $1 = Pkglookup($1.Name, builtinpkg);
+ if $1.Def == nil || $1.Def.Op != OTYPE {
+ Yyerror("%s is not a type", $1.Name);
+ $$ = nil;
+ } else {
+ $$ = $1.Def.Type;
+ }
+ }
+| '[' ']' hidden_type
+ {
+ $$ = aindex(nil, $3);
+ }
+| '[' LLITERAL ']' hidden_type
+ {
+ $$ = aindex(nodlit($2), $4);
+ }
+| LMAP '[' hidden_type ']' hidden_type
+ {
+ $$ = maptype($3, $5);
+ }
+| LSTRUCT '{' ohidden_structdcl_list '}'
+ {
+ $$ = tostruct($3);
+ }
+| LINTERFACE '{' ohidden_interfacedcl_list '}'
+ {
+ $$ = tointerface($3);
+ }
+| '*' hidden_type
+ {
+ $$ = Ptrto($2);
+ }
+| LCHAN hidden_type_non_recv_chan
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $2;
+ $$.Chan = Cboth;
+ }
+| LCHAN '(' hidden_type_recv_chan ')'
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Cboth;
+ }
+| LCHAN LCOMM hidden_type
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Csend;
+ }
+
+hidden_type_recv_chan:
+ LCOMM LCHAN hidden_type
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Crecv;
+ }
+
+hidden_type_func:
+ LFUNC '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = functype(nil, $3, $5);
+ }
+
+hidden_funarg:
+ sym hidden_type oliteral
+ {
+ $$ = Nod(ODCLFIELD, nil, typenod($2));
+ if $1 != nil {
+ $$.Left = newname($1);
+ }
+ $$.Val = $3;
+ }
+| sym LDDD hidden_type oliteral
+ {
+ var t *Type
+
+ t = typ(TARRAY);
+ t.Bound = -1;
+ t.Type = $3;
+
+ $$ = Nod(ODCLFIELD, nil, typenod(t));
+ if $1 != nil {
+ $$.Left = newname($1);
+ }
+ $$.Isddd = 1;
+ $$.Val = $4;
+ }
+
+hidden_structdcl:
+ sym hidden_type oliteral
+ {
+ var s *Sym
+ var p *Pkg
+
+ if $1 != nil && $1.Name != "?" {
+ $$ = Nod(ODCLFIELD, newname($1), typenod($2));
+ $$.Val = $3;
+ } else {
+ s = $2.Sym;
+ if s == nil && Isptr[$2.Etype] != 0 {
+ s = $2.Type.Sym;
+ }
+ p = importpkg;
+ if $1 != nil {
+ p = $1.Pkg;
+ }
+ $$ = embedded(s, p);
+ $$.Right = typenod($2);
+ $$.Val = $3;
+ }
+ }
+
+hidden_interfacedcl:
+ sym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = Nod(ODCLFIELD, newname($1), typenod(functype(fakethis(), $3, $5)));
+ }
+| hidden_type
+ {
+ $$ = Nod(ODCLFIELD, nil, typenod($1));
+ }
+
+ohidden_funres:
+ {
+ $$ = nil;
+ }
+| hidden_funres
+
+hidden_funres:
+ '(' ohidden_funarg_list ')'
+ {
+ $$ = $2;
+ }
+| hidden_type
+ {
+ $$ = list1(Nod(ODCLFIELD, nil, typenod($1)));
+ }
+
+/*
+ * importing constants
+ */
+
+hidden_literal:
+ LLITERAL
+ {
+ $$ = nodlit($1);
+ }
+| '-' LLITERAL
+ {
+ $$ = nodlit($2);
+ switch($$.Val.Ctype){
+ case CTINT, CTRUNE:
+ mpnegfix($$.Val.U.Xval);
+ break;
+ case CTFLT:
+ mpnegflt($$.Val.U.Fval);
+ break;
+ case CTCPLX:
+ mpnegflt(&$$.Val.U.Cval.Real);
+ mpnegflt(&$$.Val.U.Cval.Imag);
+ break;
+ default:
+ Yyerror("bad negated constant");
+ }
+ }
+| sym
+ {
+ $$ = oldname(Pkglookup($1.Name, builtinpkg));
+ if $$.Op != OLITERAL {
+ Yyerror("bad constant %v", Sconv($$.Sym, 0));
+ }
+ }
+
+hidden_constant:
+ hidden_literal
+| '(' hidden_literal '+' hidden_literal ')'
+ {
+ if $2.Val.Ctype == CTRUNE && $4.Val.Ctype == CTINT {
+ $$ = $2;
+ mpaddfixfix($2.Val.U.Xval, $4.Val.U.Xval, 0);
+ break;
+ }
+ $4.Val.U.Cval.Real = $4.Val.U.Cval.Imag;
+ Mpmovecflt(&$4.Val.U.Cval.Imag, 0.0);
+ $$ = nodcplxlit($2.Val, $4.Val);
+ }
+
+hidden_import_list:
+| hidden_import_list hidden_import
+
+hidden_funarg_list:
+ hidden_funarg
+ {
+ $$ = list1($1);
+ }
+| hidden_funarg_list ',' hidden_funarg
+ {
+ $$ = list($1, $3);
+ }
+
+hidden_structdcl_list:
+ hidden_structdcl
+ {
+ $$ = list1($1);
+ }
+| hidden_structdcl_list ';' hidden_structdcl
+ {
+ $$ = list($1, $3);
+ }
+
+hidden_interfacedcl_list:
+ hidden_interfacedcl
+ {
+ $$ = list1($1);
+ }
+| hidden_interfacedcl_list ';' hidden_interfacedcl
+ {
+ $$ = list($1, $3);
+ }
+
+%%
+
+func fixlbrace(lbr int) {
+ // If the opening brace was an LBODY,
+ // set up for another one now that we're done.
+ // See comment in lex.C about loophack.
+ if lbr == LBODY {
+ loophack = 1
+ }
+}
+
diff --git a/src/cmd/internal/gc/gsubr.go b/src/cmd/internal/gc/gsubr.go
new file mode 100644
index 0000000000..6762171192
--- /dev/null
+++ b/src/cmd/internal/gc/gsubr.go
@@ -0,0 +1,617 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import "cmd/internal/obj"
+
+var ddumped int
+
+var dfirst *obj.Prog
+
+var dpc *obj.Prog
+
+/*
+ * Is this node a memory operand?
+ */
+func Ismem(n *Node) int {
+ switch n.Op {
+ case OITAB,
+ OSPTR,
+ OLEN,
+ OCAP,
+ OINDREG,
+ ONAME,
+ OPARAM,
+ OCLOSUREVAR:
+ return 1
+
+ case OADDR:
+ return bool2int(Thearch.Thechar == '6' || Thearch.Thechar == '9') // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ }
+
+ return 0
+}
+
+func Samereg(a *Node, b *Node) int {
+ if a == nil || b == nil {
+ return 0
+ }
+ if a.Op != OREGISTER {
+ return 0
+ }
+ if b.Op != OREGISTER {
+ return 0
+ }
+ if a.Val.U.Reg != b.Val.U.Reg {
+ return 0
+ }
+ return 1
+}
+
+/*
+ * gsubr.c
+ */
+func Gbranch(as int, t *Type, likely int) *obj.Prog {
+ var p *obj.Prog
+
+ p = Prog(as)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.U.Branch = nil
+ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' {
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(bool2int(likely > 0))
+ }
+
+ return p
+}
+
+func Prog(as int) *obj.Prog {
+ var p *obj.Prog
+
+ if as == obj.ADATA || as == obj.AGLOBL {
+ if ddumped != 0 {
+ Fatal("already dumped data")
+ }
+ if dpc == nil {
+ dpc = Ctxt.NewProg()
+ dfirst = dpc
+ }
+
+ p = dpc
+ dpc = Ctxt.NewProg()
+ p.Link = dpc
+ } else {
+ p = Pc
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ p.Link = Pc
+ }
+
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("prog: line 0")
+ }
+ }
+
+ p.As = int16(as)
+ p.Lineno = lineno
+ return p
+}
+
+func Nodreg(n *Node, t *Type, r int) {
+ if t == nil {
+ Fatal("nodreg: t nil")
+ }
+
+ *n = Node{}
+ n.Op = OREGISTER
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Reg = int16(r)
+ n.Type = t
+}
+
+func Nodindreg(n *Node, t *Type, r int) {
+ Nodreg(n, t, r)
+ n.Op = OINDREG
+}
+
+func Afunclit(a *obj.Addr, n *Node) {
+ if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
+ a.Type = obj.TYPE_MEM
+ a.Sym = Linksym(n.Sym)
+ }
+}
+
+func Clearp(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = int64(pcloc)
+ pcloc++
+}
+
+func dumpdata() {
+ ddumped = 1
+ if dfirst == nil {
+ return
+ }
+ newplist()
+ *Pc = *dfirst
+ Pc = dpc
+ Clearp(Pc)
+}
+
+func fixautoused(p *obj.Prog) {
+ var lp **obj.Prog
+
+ for lp = &p; ; {
+ p = *lp
+ if !(p != nil) {
+ break
+ }
+ if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !(((p.From.Node).(*Node)).Used != 0) {
+ *lp = p.Link
+ continue
+ }
+
+ if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !(((p.To.Node).(*Node)).Used != 0) {
+ // Cannot remove VARDEF instruction, because - unlike TYPE handled above -
+ // VARDEFs are interspersed with other code, and a jump might be using the
+ // VARDEF as a target. Replace with a no-op instead. A later pass will remove
+ // the no-ops.
+ obj.Nopout(p)
+
+ continue
+ }
+
+ if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
+ p.From.Offset += ((p.From.Node).(*Node)).Stkdelta
+ }
+
+ if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
+ p.To.Offset += ((p.To.Node).(*Node)).Stkdelta
+ }
+
+ lp = &p.Link
+ }
+}
+
+func ggloblnod(nam *Node) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AGLOBL, nam, nil)
+ p.Lineno = nam.Lineno
+ p.From.Sym.Gotype = Linksym(ngotype(nam))
+ p.To.Sym = nil
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = nam.Type.Width
+ if nam.Readonly != 0 {
+ p.From3.Offset = obj.RODATA
+ }
+ if nam.Type != nil && !haspointers(nam.Type) {
+ p.From3.Offset |= obj.NOPTR
+ }
+}
+
+func ggloblsym(s *Sym, width int32, flags int8) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AGLOBL, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(width)
+ p.From3.Offset = int64(flags)
+}
+
+func gjmp(to *obj.Prog) *obj.Prog {
+ var p *obj.Prog
+
+ p = Gbranch(obj.AJMP, nil, 0)
+ if to != nil {
+ Patch(p, to)
+ }
+ return p
+}
+
+func gtrack(s *Sym) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AUSEFIELD, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+}
+
+func gused(n *Node) {
+ Thearch.Gins(obj.ANOP, n, nil) // used
+}
+
+func Isfat(t *Type) int {
+ if t != nil {
+ switch t.Etype {
+ case TSTRUCT,
+ TARRAY,
+ TSTRING,
+ TINTER: // maybe remove later
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func markautoused(p *obj.Prog) {
+ for ; p != nil; p = p.Link {
+ if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+
+ if p.From.Node != nil {
+ ((p.From.Node).(*Node)).Used = 1
+ }
+
+ if p.To.Node != nil {
+ ((p.To.Node).(*Node)).Used = 1
+ }
+ }
+}
+
+func Naddr(n *Node, a *obj.Addr, canemitcode int) {
+ var s *Sym
+
+ *a = obj.Zprog.From
+ if n == nil {
+ return
+ }
+
+ if n.Type != nil && n.Type.Etype != TIDEAL {
+ // TODO(rsc): This is undone by the selective clearing of width below,
+ // to match architectures that were not as aggressive in setting width
+ // during naddr. Those widths must be cleared to avoid triggering
+ // failures in gins when it detects real but heretofore latent (and one
+ // hopes innocuous) type mismatches.
+ // The type mismatches should be fixed and the clearing below removed.
+ dowidth(n.Type)
+
+ a.Width = n.Type.Width
+ }
+
+ switch n.Op {
+ default:
+ Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+
+ case OREGISTER:
+ a.Type = obj.TYPE_REG
+ a.Reg = n.Val.U.Reg
+ a.Sym = nil
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ case OINDREG:
+ a.Type = obj.TYPE_MEM
+ a.Reg = n.Val.U.Reg
+ a.Sym = Linksym(n.Sym)
+ a.Offset = n.Xoffset
+ if a.Offset != int64(int32(a.Offset)) {
+ Yyerror("offset %d too large for OINDREG", a.Offset)
+ }
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ // n->left is PHEAP ONAME for stack parameter.
+ // compute address of actual parameter on stack.
+ case OPARAM:
+ a.Etype = Simtype[n.Left.Type.Etype]
+
+ a.Width = n.Left.Type.Width
+ a.Offset = n.Xoffset
+ a.Sym = Linksym(n.Left.Sym)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_PARAM
+ a.Node = n.Left.Orig
+
+ case OCLOSUREVAR:
+ if !(Curfn.Needctxt != 0) {
+ Fatal("closurevar without needctxt")
+ }
+ a.Type = obj.TYPE_MEM
+ a.Reg = int16(Thearch.REGCTXT)
+ a.Sym = nil
+ a.Offset = n.Xoffset
+
+ case OCFUNC:
+ Naddr(n.Left, a, canemitcode)
+ a.Sym = Linksym(n.Left.Sym)
+
+ case ONAME:
+ a.Etype = 0
+ if n.Type != nil {
+ a.Etype = Simtype[n.Type.Etype]
+ }
+ a.Offset = n.Xoffset
+ s = n.Sym
+ a.Node = n.Orig
+
+ //if(a->node >= (Node*)&n)
+ // fatal("stack node");
+ if s == nil {
+ s = Lookup(".noname")
+ }
+ if n.Method != 0 {
+ if n.Type != nil {
+ if n.Type.Sym != nil {
+ if n.Type.Sym.Pkg != nil {
+ s = Pkglookup(s.Name, n.Type.Sym.Pkg)
+ }
+ }
+ }
+ }
+
+ a.Type = obj.TYPE_MEM
+ switch n.Class {
+ default:
+ Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
+ fallthrough
+
+ case PEXTERN:
+ a.Name = obj.NAME_EXTERN
+
+ case PAUTO:
+ a.Name = obj.NAME_AUTO
+
+ case PPARAM,
+ PPARAMOUT:
+ a.Name = obj.NAME_PARAM
+
+ case PFUNC:
+ a.Name = obj.NAME_EXTERN
+ a.Type = obj.TYPE_ADDR
+ a.Width = int64(Widthptr)
+ s = funcsym(s)
+ }
+
+ a.Sym = Linksym(s)
+
+ case OLITERAL:
+ if Thearch.Thechar == '8' {
+ a.Width = 0
+ }
+ switch n.Val.Ctype {
+ default:
+ Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))
+
+ case CTFLT:
+ a.Type = obj.TYPE_FCONST
+ a.U.Dval = mpgetflt(n.Val.U.Fval)
+
+ case CTINT,
+ CTRUNE:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = Mpgetfix(n.Val.U.Xval)
+
+ case CTSTR:
+ datagostring(n.Val.U.Sval, a)
+
+ case CTBOOL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = int64(n.Val.U.Bval)
+
+ case CTNIL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = 0
+ }
+
+ case OADDR:
+ Naddr(n.Left, a, canemitcode)
+ a.Etype = uint8(Tptr)
+ if Thearch.Thechar != '5' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+ a.Width = int64(Widthptr)
+ }
+ if a.Type != obj.TYPE_MEM {
+ Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+ }
+ a.Type = obj.TYPE_ADDR
+
+ // itable of interface value
+ case OITAB:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // itab(nil)
+ }
+ a.Etype = uint8(Tptr)
+ a.Width = int64(Widthptr)
+
+ // pointer in a string or slice
+ case OSPTR:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // ptr(nil)
+ }
+ a.Etype = Simtype[Tptr]
+ a.Offset += int64(Array_array)
+ a.Width = int64(Widthptr)
+
+ // len of string or slice
+ case OLEN:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // len(nil)
+ }
+ a.Etype = Simtype[TUINT]
+ if Thearch.Thechar == '9' {
+ a.Etype = Simtype[TINT]
+ }
+ a.Offset += int64(Array_nel)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+
+ // cap of string or slice
+ case OCAP:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // cap(nil)
+ }
+ a.Etype = Simtype[TUINT]
+ if Thearch.Thechar == '9' {
+ a.Etype = Simtype[TINT]
+ }
+ a.Offset += int64(Array_cap)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+ }
+}
+
+func newplist() *obj.Plist {
+ var pl *obj.Plist
+
+ pl = obj.Linknewplist(Ctxt)
+
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ pl.Firstpc = Pc
+
+ return pl
+}
+
+func nodarg(t *Type, fp int) *Node {
+ var n *Node
+ var l *NodeList
+ var first *Type
+ var savet Iter
+
+ // entire argument struct, not just one arg
+ if t.Etype == TSTRUCT && t.Funarg != 0 {
+ n = Nod(ONAME, nil, nil)
+ n.Sym = Lookup(".args")
+ n.Type = t
+ first = Structfirst(&savet, &t)
+ if first == nil {
+ Fatal("nodarg: bad struct")
+ }
+ if first.Width == BADWIDTH {
+ Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
+ }
+ n.Xoffset = first.Width
+ n.Addable = 1
+ goto fp
+ }
+
+ if t.Etype != TFIELD {
+ Fatal("nodarg: not field %v", Tconv(t, 0))
+ }
+
+ if fp == 1 {
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
+ return n
+ }
+ }
+ }
+
+ n = Nod(ONAME, nil, nil)
+ n.Type = t.Type
+ n.Sym = t.Sym
+
+ if t.Width == BADWIDTH {
+ Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
+ }
+ n.Xoffset = t.Width
+ n.Addable = 1
+ n.Orig = t.Nname
+
+ // Rewrite argument named _ to __,
+ // or else the assignment to _ will be
+ // discarded during code generation.
+fp:
+ if isblank(n) {
+ n.Sym = Lookup("__")
+ }
+
+ switch fp {
+ case 0: // output arg
+ n.Op = OINDREG
+
+ n.Val.U.Reg = int16(Thearch.REGSP)
+ if Thearch.Thechar == '5' {
+ n.Xoffset += 4
+ }
+ if Thearch.Thechar == '9' {
+ n.Xoffset += 8
+ }
+
+ case 1: // input arg
+ n.Class = PPARAM
+
+ case 2: // offset output arg
+ Fatal("shouldn't be used")
+
+ n.Op = OINDREG
+ n.Val.U.Reg = int16(Thearch.REGSP)
+ n.Xoffset += Types[Tptr].Width
+ }
+
+ n.Typecheck = 1
+ return n
+}
+
+func Patch(p *obj.Prog, to *obj.Prog) {
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatal("patch: not a branch")
+ }
+ p.To.U.Branch = to
+ p.To.Offset = to.Pc
+}
+
+func unpatch(p *obj.Prog) *obj.Prog {
+ var q *obj.Prog
+
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatal("unpatch: not a branch")
+ }
+ q = p.To.U.Branch
+ p.To.U.Branch = nil
+ p.To.Offset = 0
+ return q
+}
diff --git a/src/cmd/internal/gc/init.go b/src/cmd/internal/gc/init.go
new file mode 100644
index 0000000000..e738dba42f
--- /dev/null
+++ b/src/cmd/internal/gc/init.go
@@ -0,0 +1,232 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "fmt"
+
+// case OADD:
+// if(n->right->op == OLITERAL) {
+// v = n->right->vconst;
+// naddr(n->left, a, canemitcode);
+// } else
+// if(n->left->op == OLITERAL) {
+// v = n->left->vconst;
+// naddr(n->right, a, canemitcode);
+// } else
+// goto bad;
+// a->offset += v;
+// break;
+
+/*
+ * a function named init is a special case.
+ * it is called by the initialization before
+ * main is run. to make it unique within a
+ * package and also uncallable, the name,
+ * normally "pkg.init", is altered to "pkg.init·1".
+ */
+
+var renameinit_initgen int
+
+func renameinit() *Sym {
+ renameinit_initgen++
+ namebuf = fmt.Sprintf("init·%d", renameinit_initgen)
+ return Lookup(namebuf)
+}
+
+/*
+ * hand-craft the following initialization code
+ * var initdone· uint8 (1)
+ * func init() (2)
+ * if initdone· != 0 { (3)
+ * if initdone· == 2 (4)
+ * return
+ * throw(); (5)
+ * }
+ * initdone· = 1; (6)
+ * // over all matching imported symbols
+ * <pkg>.init() (7)
+ * { <init stmts> } (8)
+ * init·<n>() // if any (9)
+ * initdone· = 2; (10)
+ * return (11)
+ * }
+ */
+func anyinit(n *NodeList) int {
+ var h uint32
+ var s *Sym
+ var l *NodeList
+
+ // are there any interesting init statements
+ for l = n; l != nil; l = l.Next {
+ switch l.N.Op {
+ case ODCLFUNC,
+ ODCLCONST,
+ ODCLTYPE,
+ OEMPTY:
+ break
+
+ case OAS:
+ if isblank(l.N.Left) && candiscard(l.N.Right) != 0 {
+ break
+ }
+ fallthrough
+
+ // fall through
+ default:
+ return 1
+ }
+ }
+
+ // is this main
+ if localpkg.Name == "main" {
+ return 1
+ }
+
+ // is there an explicit init function
+ namebuf = fmt.Sprintf("init·1")
+
+ s = Lookup(namebuf)
+ if s.Def != nil {
+ return 1
+ }
+
+ // are there any imported init functions
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Name[0] != 'i' || s.Name != "init" {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ return 1
+ }
+ }
+
+ // then none
+ return 0
+}
+
+func fninit(n *NodeList) {
+ var i int
+ var gatevar *Node
+ var a *Node
+ var b *Node
+ var fn *Node
+ var r *NodeList
+ var h uint32
+ var s *Sym
+ var initsym *Sym
+
+ if Debug['A'] != 0 {
+ // sys.go or unsafe.go during compiler build
+ return
+ }
+
+ n = initfix(n)
+ if !(anyinit(n) != 0) {
+ return
+ }
+
+ r = nil
+
+ // (1)
+ namebuf = fmt.Sprintf("initdone·")
+
+ gatevar = newname(Lookup(namebuf))
+ addvar(gatevar, Types[TUINT8], PEXTERN)
+
+ // (2)
+ Maxarg = 0
+
+ namebuf = fmt.Sprintf("init")
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ initsym = Lookup(namebuf)
+ fn.Nname = newname(initsym)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
+ declare(fn.Nname, PFUNC)
+ funchdr(fn)
+
+ // (3)
+ a = Nod(OIF, nil, nil)
+
+ a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
+ r = list(r, a)
+
+ // (4)
+ b = Nod(OIF, nil, nil)
+
+ b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
+ b.Nbody = list1(Nod(ORETURN, nil, nil))
+ a.Nbody = list1(b)
+
+ // (5)
+ b = syslook("throwinit", 0)
+
+ b = Nod(OCALL, b, nil)
+ a.Nbody = list(a.Nbody, b)
+
+ // (6)
+ a = Nod(OAS, gatevar, Nodintconst(1))
+
+ r = list(r, a)
+
+ // (7)
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Name[0] != 'i' || s.Name != "init" {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ if s == initsym {
+ continue
+ }
+
+ // could check that it is fn of no args/returns
+ a = Nod(OCALL, s.Def, nil)
+
+ r = list(r, a)
+ }
+ }
+
+ // (8)
+ r = concat(r, n)
+
+ // (9)
+ // could check that it is fn of no args/returns
+ for i = 1; ; i++ {
+ namebuf = fmt.Sprintf("init·%d", i)
+ s = Lookup(namebuf)
+ if s.Def == nil {
+ break
+ }
+ a = Nod(OCALL, s.Def, nil)
+ r = list(r, a)
+ }
+
+ // (10)
+ a = Nod(OAS, gatevar, Nodintconst(2))
+
+ r = list(r, a)
+
+ // (11)
+ a = Nod(ORETURN, nil, nil)
+
+ r = list(r, a)
+ exportsym(fn.Nname)
+
+ fn.Nbody = r
+ funcbody(fn)
+
+ Curfn = fn
+ typecheck(&fn, Etop)
+ typechecklist(r, Etop)
+ Curfn = nil
+ funccompile(fn)
+}
diff --git a/src/cmd/internal/gc/inl.go b/src/cmd/internal/gc/inl.go
new file mode 100644
index 0000000000..1b4b40d8e0
--- /dev/null
+++ b/src/cmd/internal/gc/inl.go
@@ -0,0 +1,1040 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then inlcalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The debug['l'] flag controls the agressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. -ll and more is useful to flush out bugs.
+// These additional levels (beyond -l) may be buggy and are not supported.
+// 0: disabled
+// 1: 40-nodes leaf functions, oneliners, lazy typechecking (default)
+// 2: early typechecking of all imported bodies
+// 3: allow variadic functions
+// 4: allow non-leaf functions , (breaks runtime.Caller)
+// 5: transitive inlining
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+//
+// TODO:
+// - inline functions with ... args
+// - handle T.meth(f()) with func f() (t T, arg, arg, )
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+// Used by caninl.
+
+// Used by inlcalls
+
+// Used during inlsubst[list]
+var inlfn *Node // function currently being inlined
+
+var inlretlabel *Node // target of the goto substituted in place of a return
+
+var inlretvars *NodeList // temp out variables
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *Node) *Pkg {
+ var rcvr *Type
+
+ if fn.Type.Thistuple != 0 {
+ // method
+ rcvr = getthisx(fn.Type).Type.Type
+
+ if Isptr[rcvr.Etype] != 0 {
+ rcvr = rcvr.Type
+ }
+ if !(rcvr.Sym != nil) {
+ Fatal("receiver with no sym: [%v] %v (%v)", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Tconv(rcvr, 0))
+ }
+ return rcvr.Sym.Pkg
+ }
+
+ // non-method
+ return fn.Sym.Pkg
+}
+
+// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// because they're a copy of an already checked body.
+func typecheckinl(fn *Node) {
+ var savefn *Node
+ var pkg *Pkg
+ var save_safemode int
+ var lno int
+
+ lno = int(setlineno(fn))
+
+ // typecheckinl is only for imported functions;
+ // their bodies may refer to unsafe as long as the package
+ // was marked safe during import (which was checked then).
+ // the ->inl of a local function has been typechecked before caninl copied it.
+ pkg = fnpkg(fn)
+
+ if pkg == localpkg || pkg == nil {
+ return // typecheckinl on local function
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("typecheck import [%v] %v { %v }\n", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Hconv(fn.Inl, obj.FmtSharp))
+ }
+
+ save_safemode = safemode
+ safemode = 0
+
+ savefn = Curfn
+ Curfn = fn
+ typechecklist(fn.Inl, Etop)
+ Curfn = savefn
+
+ safemode = save_safemode
+
+ lineno = int32(lno)
+}
+
+// Caninl determines whether fn is inlineable.
+// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
+// fn and ->nbody will already have been typechecked.
+func caninl(fn *Node) {
+ var savefn *Node
+ var t *Type
+ var budget int
+
+ if fn.Op != ODCLFUNC {
+ Fatal("caninl %v", Nconv(fn, 0))
+ }
+ if !(fn.Nname != nil) {
+ Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
+ }
+
+ // If fn has no body (is defined outside of Go), cannot inline it.
+ if fn.Nbody == nil {
+ return
+ }
+
+ if fn.Typecheck == 0 {
+ Fatal("caninl on non-typechecked function %v", Nconv(fn, 0))
+ }
+
+ // can't handle ... args yet
+ if Debug['l'] < 3 {
+ for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ if t.Isddd != 0 {
+ return
+ }
+ }
+ }
+
+ budget = 40 // allowed hairyness
+ if ishairylist(fn.Nbody, &budget) != 0 {
+ return
+ }
+
+ savefn = Curfn
+ Curfn = fn
+
+ fn.Nname.Inl = fn.Nbody
+ fn.Nbody = inlcopylist(fn.Nname.Inl)
+ fn.Nname.Inldcl = inlcopylist(fn.Nname.Defn.Dcl)
+
+ // hack, TODO, check for better way to link method nodes back to the thing with the ->inl
+ // this is so export can find the body of a method
+ fn.Type.Nname = fn.Nname
+
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Nname, obj.FmtSharp), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Nname.Inl, obj.FmtSharp))
+ } else if Debug['m'] != 0 {
+ fmt.Printf("%v: can inline %v\n", fn.Line(), Nconv(fn.Nname, 0))
+ }
+
+ Curfn = savefn
+}
+
+// Look for anything we want to punt on.
+func ishairylist(ll *NodeList, budget *int) int {
+ for ; ll != nil; ll = ll.Next {
+ if ishairy(ll.N, budget) != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+func ishairy(n *Node, budget *int) int {
+ if !(n != nil) {
+ return 0
+ }
+
+ // Things that are too hairy, irrespective of the budget
+ switch n.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OPANIC,
+ ORECOVER:
+ if Debug['l'] < 4 {
+ return 1
+ }
+
+ case OCLOSURE,
+ OCALLPART,
+ ORANGE,
+ OFOR,
+ OSELECT,
+ OSWITCH,
+ OPROC,
+ ODEFER,
+ ODCLTYPE, // can't print yet
+ ODCLCONST, // can't print yet
+ ORETJMP:
+ return 1
+ }
+
+ (*budget)--
+
+ return bool2int(*budget < 0 || ishairy(n.Left, budget) != 0 || ishairy(n.Right, budget) != 0 || ishairylist(n.List, budget) != 0 || ishairylist(n.Rlist, budget) != 0 || ishairylist(n.Ninit, budget) != 0 || ishairy(n.Ntest, budget) != 0 || ishairy(n.Nincr, budget) != 0 || ishairylist(n.Nbody, budget) != 0 || ishairylist(n.Nelse, budget) != 0)
+}
+
+// Inlcopy and inlcopylist recursively copy the body of a function.
+// Any name-like node of non-local class is marked for re-export by adding it to
+// the exportlist.
+func inlcopylist(ll *NodeList) *NodeList {
+ var l *NodeList
+
+ l = nil
+ for ; ll != nil; ll = ll.Next {
+ l = list(l, inlcopy(ll.N))
+ }
+ return l
+}
+
+func inlcopy(n *Node) *Node {
+ var m *Node
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OLITERAL:
+ return n
+ }
+
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Inl = nil
+ m.Left = inlcopy(n.Left)
+ m.Right = inlcopy(n.Right)
+ m.List = inlcopylist(n.List)
+ m.Rlist = inlcopylist(n.Rlist)
+ m.Ninit = inlcopylist(n.Ninit)
+ m.Ntest = inlcopy(n.Ntest)
+ m.Nincr = inlcopy(n.Nincr)
+ m.Nbody = inlcopylist(n.Nbody)
+ m.Nelse = inlcopylist(n.Nelse)
+
+ return m
+}
+
+// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions. This is the external entry point.
+func inlcalls(fn *Node) {
+ var savefn *Node
+
+ savefn = Curfn
+ Curfn = fn
+ inlnode(&fn)
+ if fn != Curfn {
+ Fatal("inlnode replaced curfn")
+ }
+ Curfn = savefn
+}
+
+// Turn an OINLCALL into a statement.
+func inlconv2stmt(n *Node) {
+ n.Op = OBLOCK
+
+ // n->ninit stays
+ n.List = n.Nbody
+
+ n.Nbody = nil
+ n.Rlist = nil
+}
+
+// Turn an OINLCALL into a single valued expression.
+func inlconv2expr(np **Node) {
+ var n *Node
+ var r *Node
+ n = *np
+ r = n.Rlist.N
+ addinit(&r, concat(n.Ninit, n.Nbody))
+ *np = r
+}
+
+// Turn the rlist (with the return values) of the OINLCALL in
+// n into an expression list lumping the ninit and body
+// containing the inlined statements on the first list element so
+// order will be preserved Used in return, oas2func and call
+// statements.
+func inlconv2list(n *Node) *NodeList {
+ var l *NodeList
+
+ if n.Op != OINLCALL || n.Rlist == nil {
+ Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ l = n.Rlist
+ addinit(&l.N, concat(n.Ninit, n.Nbody))
+ return l
+}
+
+func inlnodelist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ inlnode(&l.N)
+ }
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here. so this is cleaner and
+// shorter and less complicated.
+func inlnode(np **Node) {
+ var n *Node
+ var l *NodeList
+ var lno int
+
+ if *np == nil {
+ return
+ }
+
+ n = *np
+
+ switch n.Op {
+ // inhibit inlining of their argument
+ case ODEFER,
+ OPROC:
+ switch n.Left.Op {
+ case OCALLFUNC,
+ OCALLMETH:
+ n.Left.Etype = n.Op
+ }
+ fallthrough
+
+ // TODO do them here (or earlier),
+ // so escape analysis can avoid more heapmoves.
+ case OCLOSURE:
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ inlnodelist(n.Ninit)
+ for l = n.Ninit; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ inlnode(&n.Left)
+ if n.Left != nil && n.Left.Op == OINLCALL {
+ inlconv2expr(&n.Left)
+ }
+
+ inlnode(&n.Right)
+ if n.Right != nil && n.Right.Op == OINLCALL {
+ inlconv2expr(&n.Right)
+ }
+
+ inlnodelist(n.List)
+ switch n.Op {
+ case OBLOCK:
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ // if we just replaced arg in f(arg()) or return arg with an inlined call
+ // and arg returns multiple values, glue as list
+ case ORETURN,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER,
+ OAPPEND,
+ OCOMPLEX:
+ if count(n.List) == 1 && n.List.N.Op == OINLCALL && count(n.List.N.Rlist) > 1 {
+ n.List = inlconv2list(n.List.N)
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ default:
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2expr(&l.N)
+ }
+ }
+ }
+
+ inlnodelist(n.Rlist)
+ switch n.Op {
+ case OAS2FUNC:
+ if n.Rlist.N.Op == OINLCALL {
+ n.Rlist = inlconv2list(n.Rlist.N)
+ n.Op = OAS2
+ n.Typecheck = 0
+ typecheck(np, Etop)
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ default:
+ for l = n.Rlist; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2expr(&l.N)
+ }
+ }
+ }
+
+ inlnode(&n.Ntest)
+ if n.Ntest != nil && n.Ntest.Op == OINLCALL {
+ inlconv2expr(&n.Ntest)
+ }
+
+ inlnode(&n.Nincr)
+ if n.Nincr != nil && n.Nincr.Op == OINLCALL {
+ inlconv2stmt(n.Nincr)
+ }
+
+ inlnodelist(n.Nbody)
+ for l = n.Nbody; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ inlnodelist(n.Nelse)
+ for l = n.Nelse; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ // with all the branches out of the way, it is now time to
+ // transmogrify this node itself unless inhibited by the
+ // switch at the top of this function.
+ switch n.Op {
+ case OCALLFUNC,
+ OCALLMETH:
+ if n.Etype == OPROC || n.Etype == ODEFER {
+ return
+ }
+ }
+
+ switch n.Op {
+ case OCALLFUNC:
+ if Debug['m'] > 3 {
+ fmt.Printf("%v:call to func %v\n", n.Line(), Nconv(n.Left, obj.FmtSign))
+ }
+ if n.Left.Inl != nil { // normal case
+ mkinlcall(np, n.Left, int(n.Isddd))
+ } else if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions
+ if n.Left.Sym.Def != nil {
+ mkinlcall(np, n.Left.Sym.Def, int(n.Isddd))
+ }
+ }
+
+ case OCALLMETH:
+ if Debug['m'] > 3 {
+ fmt.Printf("%v:call to meth %v\n", n.Line(), Nconv(n.Left.Right, obj.FmtLong))
+ }
+
+ // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
+ if n.Left.Type == nil {
+ Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+ }
+
+ if n.Left.Type.Nname == nil {
+ Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+ }
+
+ mkinlcall(np, n.Left.Type.Nname, int(n.Isddd))
+ }
+
+ lineno = int32(lno)
+}
+
+func mkinlcall(np **Node, fn *Node, isddd int) {
+ var save_safemode int
+ var pkg *Pkg
+
+ save_safemode = safemode
+
+ // imported functions may refer to unsafe as long as the
+ // package was marked safe during import (already checked).
+ pkg = fnpkg(fn)
+
+ if pkg != localpkg && pkg != nil {
+ safemode = 0
+ }
+ mkinlcall1(np, fn, isddd)
+ safemode = save_safemode
+}
+
+func tinlvar(t *Type) *Node {
+ if t.Nname != nil && !isblank(t.Nname) {
+ if !(t.Nname.Inlvar != nil) {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ return t.Nname.Inlvar
+ }
+
+ typecheck(&nblank, Erv|Easgn)
+ return nblank
+}
+
+var inlgen int
+
+// if *np is a call, and fn is a function with an inlinable body, substitute *np with an OINLCALL.
+// On return ninit has the parameter assignments, the nbody is the
+// inlined function body and list, rlist contain the input, output
+// parameters.
+func mkinlcall1(np **Node, fn *Node, isddd int) {
+ var i int
+ var chkargcount int
+ var n *Node
+ var call *Node
+ var saveinlfn *Node
+ var as *Node
+ var m *Node
+ var dcl *NodeList
+ var ll *NodeList
+ var ninit *NodeList
+ var body *NodeList
+ var t *Type
+ var variadic int
+ var varargcount int
+ var multiret int
+ var vararg *Node
+ var varargs *NodeList
+ var varargtype *Type
+ var vararrtype *Type
+
+ // For variadic fn.
+ if fn.Inl == nil {
+ return
+ }
+
+ if fn == Curfn || fn.Defn == Curfn {
+ return
+ }
+
+ if Debug['l'] < 2 {
+ typecheckinl(fn)
+ }
+
+ n = *np
+
+ // Bingo, we have a function node, and it has an inlineable body
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: inlining call to %v %v { %v }\n", n.Line(), Sconv(fn.Sym, 0), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Inl, obj.FmtSharp))
+ } else if Debug['m'] != 0 {
+ fmt.Printf("%v: inlining call to %v\n", n.Line(), Nconv(fn, 0))
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v: Before inlining: %v\n", n.Line(), Nconv(n, obj.FmtSign))
+ }
+
+ saveinlfn = inlfn
+ inlfn = fn
+
+ ninit = n.Ninit
+
+ //dumplist("ninit pre", ninit);
+
+ if fn.Defn != nil { // local function
+ dcl = fn.Inldcl // imported function
+ } else {
+ dcl = fn.Dcl
+ }
+
+ inlretvars = nil
+ i = 0
+
+ // Make temp names to use instead of the originals
+ for ll = dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class == PPARAMOUT { // return values handled below.
+ continue
+ }
+ if ll.N.Op == ONAME {
+ ll.N.Inlvar = inlvar(ll.N)
+
+ // Typecheck because inlvar is not necessarily a function parameter.
+ typecheck(&ll.N.Inlvar, Erv)
+
+ if ll.N.Class&^PHEAP != PAUTO {
+ ninit = list(ninit, Nod(ODCL, ll.N.Inlvar, nil)) // otherwise gen won't emit the allocations for heapallocs
+ }
+ }
+ }
+
+ // temporaries for return values.
+ for t = getoutargx(fn.Type).Type; t != nil; t = t.Down {
+ if t != nil && t.Nname != nil && !isblank(t.Nname) {
+ m = inlvar(t.Nname)
+ typecheck(&m, Erv)
+ t.Nname.Inlvar = m
+ } else {
+ // anonymous return values, synthesize names for use in assignment that replaces return
+ m = retvar(t, i)
+ i++
+ }
+
+ ninit = list(ninit, Nod(ODCL, m, nil))
+ inlretvars = list(inlretvars, m)
+ }
+
+ // assign receiver.
+ if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
+ // method call with a receiver.
+ t = getthisx(fn.Type).Type
+
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ if !(n.Left.Left != nil) {
+ Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
+ }
+ if t == nil {
+ Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ }
+ as = Nod(OAS, tinlvar(t), n.Left.Left)
+ if as != nil {
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+ }
+
+ // check if inlined function is variadic.
+ variadic = 0
+
+ varargtype = nil
+ varargcount = 0
+ for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ if t.Isddd != 0 {
+ variadic = 1
+ varargtype = t.Type
+ }
+ }
+
+ // but if argument is dotted too forget about variadicity.
+ if variadic != 0 && isddd != 0 {
+ variadic = 0
+ }
+
+ // check if argument is actually a returned tuple from call.
+ multiret = 0
+
+ if n.List != nil && !(n.List.Next != nil) {
+ switch n.List.N.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ if n.List.N.Left.Type.Outtuple > 1 {
+ multiret = n.List.N.Left.Type.Outtuple - 1
+ }
+ }
+ }
+
+ if variadic != 0 {
+ varargcount = count(n.List) + multiret
+ if n.Left.Op != ODOTMETH {
+ varargcount -= fn.Type.Thistuple
+ }
+ varargcount -= fn.Type.Intuple - 1
+ }
+
+ // assign arguments to the parameters' temp names
+ as = Nod(OAS2, nil, nil)
+
+ as.Rlist = n.List
+ ll = n.List
+
+ // TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
+ if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
+ // non-method call to method
+ if !(n.List != nil) {
+ Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // append receiver inlvar to LHS.
+ t = getthisx(fn.Type).Type
+
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ if t == nil {
+ Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ }
+ as.List = list(as.List, tinlvar(t))
+ ll = ll.Next // track argument count.
+ }
+
+ // append ordinary arguments to LHS.
+ chkargcount = bool2int(n.List != nil && n.List.Next != nil)
+
+ vararg = nil // the slice argument to a variadic call
+ varargs = nil // the list of LHS names to put in vararg.
+ if !(chkargcount != 0) {
+ // 0 or 1 expression on RHS.
+ for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
+ if variadic != 0 && t.Isddd != 0 {
+ vararg = tinlvar(t)
+ for i = 0; i < varargcount && ll != nil; i++ {
+ m = argvar(varargtype, i)
+ varargs = list(varargs, m)
+ as.List = list(as.List, m)
+ }
+
+ break
+ }
+
+ as.List = list(as.List, tinlvar(t))
+ }
+ } else {
+ // match arguments except final variadic (unless the call is dotted itself)
+ for t = getinargx(fn.Type).Type; t != nil; {
+ if !(ll != nil) {
+ break
+ }
+ if variadic != 0 && t.Isddd != 0 {
+ break
+ }
+ as.List = list(as.List, tinlvar(t))
+ t = t.Down
+ ll = ll.Next
+ }
+
+ // match varargcount arguments with variadic parameters.
+ if variadic != 0 && t != nil && t.Isddd != 0 {
+ vararg = tinlvar(t)
+ for i = 0; i < varargcount && ll != nil; i++ {
+ m = argvar(varargtype, i)
+ varargs = list(varargs, m)
+ as.List = list(as.List, m)
+ ll = ll.Next
+ }
+
+ if i == varargcount {
+ t = t.Down
+ }
+ }
+
+ if ll != nil || t != nil {
+ Fatal("arg count mismatch: %v vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ }
+ }
+
+ if as.Rlist != nil {
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ // turn the variadic args into a slice.
+ if variadic != 0 {
+ as = Nod(OAS, vararg, nil)
+ if !(varargcount != 0) {
+ as.Right = nodnil()
+ as.Right.Type = varargtype
+ } else {
+ vararrtype = typ(TARRAY)
+ vararrtype.Type = varargtype.Type
+ vararrtype.Bound = int64(varargcount)
+
+ as.Right = Nod(OCOMPLIT, nil, typenod(varargtype))
+ as.Right.List = varargs
+ as.Right = Nod(OSLICE, as.Right, Nod(OKEY, nil, nil))
+ }
+
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ // zero the outparams
+ for ll = inlretvars; ll != nil; ll = ll.Next {
+ as = Nod(OAS, ll.N, nil)
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ inlretlabel = newlabel_inl()
+ inlgen++
+ body = inlsubstlist(fn.Inl)
+
+ body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
+ body = list(body, Nod(OLABEL, inlretlabel, nil))
+
+ typechecklist(body, Etop)
+
+ //dumplist("ninit post", ninit);
+
+ call = Nod(OINLCALL, nil, nil)
+
+ call.Ninit = ninit
+ call.Nbody = body
+ call.Rlist = inlretvars
+ call.Type = n.Type
+ call.Typecheck = 1
+
+ setlno(call, int(n.Lineno))
+
+ //dumplist("call body", body);
+
+ *np = call
+
+ inlfn = saveinlfn
+
+ // transitive inlining
+ // TODO do this pre-expansion on fn->inl directly. requires
+ // either supporting exporting statemetns with complex ninits
+ // or saving inl and making inlinl
+ if Debug['l'] >= 5 {
+ body = fn.Inl
+ fn.Inl = nil // prevent infinite recursion
+ inlnodelist(call.Nbody)
+ for ll = call.Nbody; ll != nil; ll = ll.Next {
+ if ll.N.Op == OINLCALL {
+ inlconv2stmt(ll.N)
+ }
+ }
+ fn.Inl = body
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v: After inlining %v\n\n", n.Line(), Nconv(*np, obj.FmtSign))
+ }
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *Node) *Node {
+ var n *Node
+
+ if Debug['m'] > 3 {
+ fmt.Printf("inlvar %v\n", Nconv(var_, obj.FmtSign))
+ }
+
+ n = newname(var_.Sym)
+ n.Type = var_.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ n.Addrtaken = var_.Addrtaken
+
+ // Esc pass wont run if we're inlining into a iface wrapper.
+ // Luckily, we can steal the results from the target func.
+ // If inlining a function defined in another package after
+ // escape analysis is done, treat all local vars as escaping.
+ // See issue 9537.
+ if var_.Esc == EscHeap || (inl_nonlocal != 0 && var_.Op == ONAME) {
+ addrescapes(n)
+ }
+
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *Type, i int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("~r%d", i)
+ n = newname(Lookup(namebuf))
+ n.Type = t.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's arguments
+// when they come from a multiple return call.
+func argvar(t *Type, i int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("~arg%d", i)
+ n = newname(Lookup(namebuf))
+ n.Type = t.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+var newlabel_inl_label int
+
+func newlabel_inl() *Node {
+ var n *Node
+
+ newlabel_inl_label++
+ namebuf = fmt.Sprintf(".inlret%.6d", newlabel_inl_label)
+ n = newname(Lookup(namebuf))
+ n.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
+ return n
+}
+
+// inlsubst and inlsubstlist recursively copy the body of the saved
+// pristine ->inl body of the function while substituting references
+// to input/output parameters with ones to the tmpnames, and
+// substituting returns with assignments to the output.
+func inlsubstlist(ll *NodeList) *NodeList {
+ var l *NodeList
+
+ l = nil
+ for ; ll != nil; ll = ll.Next {
+ l = list(l, inlsubst(ll.N))
+ }
+ return l
+}
+
+func inlsubst(n *Node) *Node {
+ var p string
+ var m *Node
+ var as *Node
+ var ll *NodeList
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME:
+ if n.Inlvar != nil { // These will be set during inlnode
+ if Debug['m'] > 2 {
+ fmt.Printf("substituting name %v -> %v\n", Nconv(n, obj.FmtSign), Nconv(n.Inlvar, obj.FmtSign))
+ }
+ return n.Inlvar
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("not substituting name %v\n", Nconv(n, obj.FmtSign))
+ }
+ return n
+
+ case OLITERAL,
+ OTYPE:
+ return n
+
+ // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
+
+ // dump("Return before substitution", n);
+ case ORETURN:
+ m = Nod(OGOTO, inlretlabel, nil)
+
+ m.Ninit = inlsubstlist(n.Ninit)
+
+ if inlretvars != nil && n.List != nil {
+ as = Nod(OAS2, nil, nil)
+
+ // shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
+ for ll = inlretvars; ll != nil; ll = ll.Next {
+ as.List = list(as.List, ll.N)
+ }
+ as.Rlist = inlsubstlist(n.List)
+ typecheck(&as, Etop)
+ m.Ninit = list(m.Ninit, as)
+ }
+
+ typechecklist(m.Ninit, Etop)
+ typecheck(&m, Etop)
+
+ // dump("Return after substitution", m);
+ return m
+
+ case OGOTO,
+ OLABEL:
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Ninit = nil
+ p = fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
+ m.Left = newname(Lookup(p))
+
+ return m
+ }
+
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Ninit = nil
+
+ if n.Op == OCLOSURE {
+ Fatal("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
+ }
+
+ m.Left = inlsubst(n.Left)
+ m.Right = inlsubst(n.Right)
+ m.List = inlsubstlist(n.List)
+ m.Rlist = inlsubstlist(n.Rlist)
+ m.Ninit = concat(m.Ninit, inlsubstlist(n.Ninit))
+ m.Ntest = inlsubst(n.Ntest)
+ m.Nincr = inlsubst(n.Nincr)
+ m.Nbody = inlsubstlist(n.Nbody)
+ m.Nelse = inlsubstlist(n.Nelse)
+
+ return m
+}
+
+// Plaster over linenumbers
+func setlnolist(ll *NodeList, lno int) {
+ for ; ll != nil; ll = ll.Next {
+ setlno(ll.N, lno)
+ }
+}
+
+func setlno(n *Node, lno int) {
+ if !(n != nil) {
+ return
+ }
+
+ // don't clobber names, unless they're freshly synthesized
+ if n.Op != ONAME || n.Lineno == 0 {
+ n.Lineno = int32(lno)
+ }
+
+ setlno(n.Left, lno)
+ setlno(n.Right, lno)
+ setlnolist(n.List, lno)
+ setlnolist(n.Rlist, lno)
+ setlnolist(n.Ninit, lno)
+ setlno(n.Ntest, lno)
+ setlno(n.Nincr, lno)
+ setlnolist(n.Nbody, lno)
+ setlnolist(n.Nelse, lno)
+}
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
new file mode 100644
index 0000000000..98cebe8b12
--- /dev/null
+++ b/src/cmd/internal/gc/lex.go
@@ -0,0 +1,3204 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var yychar_lex int
+
+var yyprev int
+
+var yylast int
+
+var imported_unsafe int
+
+var goos string
+
+var goarch string
+
+var goroot string
+
+// Debug arguments.
+// These can be specified with the -d flag, as in "-d nil"
+// to set the debug_checknil variable. In general the list passed
+// to -d can be comma-separated.
+var debugtab = []struct {
+ name string
+ val *int
+}{struct {
+ name string
+ val *int
+}{"nil", &Debug_checknil}}
+
+// Our own isdigit, isspace, isalpha, isalnum that take care
+// of EOF and other out of range arguments.
+func yy_isdigit(c int) bool {
+ return c >= 0 && c <= 0xFF && isdigit(c)
+}
+
+func yy_isspace(c int) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func yy_isalpha(c int) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+func yy_isalnum(c int) bool {
+ return c >= 0 && c <= 0xFF && isalnum(c)
+}
+
+// Disallow use of isdigit etc.
+
+const (
+ EOF = -1
+)
+
+func usage() {
+ fmt.Printf("usage: %cg [options] file.go...\n", Thearch.Thechar)
+ obj.Flagprint(1)
+ os.Exit(2)
+}
+
+func fault(s int) {
+ // If we've already complained about things
+ // in the program, don't bother complaining
+ // about the seg fault too; let the user clean up
+ // the code and try again.
+ if nsavederrors+nerrors > 0 {
+ errorexit()
+ }
+ Fatal("fault")
+}
+
+func doversion() {
+ var p string
+ var sep string
+
+ p = obj.Expstring()
+ if p == "X:none" {
+ p = ""
+ }
+ sep = ""
+ if p[0] != 0 {
+ sep = " "
+ }
+ fmt.Printf("%cg version %s%s%s\n", Thearch.Thechar, obj.Getgoversion(), sep, p)
+ os.Exit(0)
+}
+
+func Main() {
+ var l *NodeList
+ var p string
+
+ // Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
+ // but not other values.
+ p = obj.Getgoarch()
+
+ if !strings.HasPrefix(p, Thearch.Thestring) {
+ log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
+ }
+ goarch = p
+
+ Thearch.Linkarchinit()
+ Ctxt = obj.Linknew(Thearch.Thelinkarch)
+ Ctxt.Diag = Yyerror
+ Ctxt.Bso = &bstdout
+ bstdout = *obj.Binitw(os.Stdout)
+
+ localpkg = mkpkg(newstrlit(""))
+ localpkg.Prefix = "\"\""
+
+ // pseudo-package, for scoping
+ builtinpkg = mkpkg(newstrlit("go.builtin"))
+
+ builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+ // pseudo-package, accessed by import "unsafe"
+ unsafepkg = mkpkg(newstrlit("unsafe"))
+
+ unsafepkg.Name = "unsafe"
+
+ // real package, referred to by generated runtime calls
+ Runtimepkg = mkpkg(newstrlit("runtime"))
+
+ Runtimepkg.Name = "runtime"
+
+ // pseudo-packages used in symbol tables
+ gostringpkg = mkpkg(newstrlit("go.string"))
+
+ gostringpkg.Name = "go.string"
+ gostringpkg.Prefix = "go.string" // not go%2estring
+
+ itabpkg = mkpkg(newstrlit("go.itab"))
+
+ itabpkg.Name = "go.itab"
+ itabpkg.Prefix = "go.itab" // not go%2eitab
+
+ weaktypepkg = mkpkg(newstrlit("go.weak.type"))
+
+ weaktypepkg.Name = "go.weak.type"
+ weaktypepkg.Prefix = "go.weak.type" // not go%2eweak%2etype
+
+ typelinkpkg = mkpkg(newstrlit("go.typelink"))
+ typelinkpkg.Name = "go.typelink"
+ typelinkpkg.Prefix = "go.typelink" // not go%2etypelink
+
+ trackpkg = mkpkg(newstrlit("go.track"))
+
+ trackpkg.Name = "go.track"
+ trackpkg.Prefix = "go.track" // not go%2etrack
+
+ typepkg = mkpkg(newstrlit("type"))
+
+ typepkg.Name = "type"
+
+ goroot = obj.Getgoroot()
+ goos = obj.Getgoos()
+
+ Nacl = goos == "nacl"
+ if Nacl {
+ flag_largemodel = 1
+ }
+
+ outfile = ""
+ obj.Flagcount("+", "compiling runtime", &compiling_runtime)
+ obj.Flagcount("%", "debug non-static initializers", &Debug['%'])
+ obj.Flagcount("A", "for bootstrapping, allow 'any' type", &Debug['A'])
+ obj.Flagcount("B", "disable bounds checking", &Debug['B'])
+ obj.Flagstr("D", "path: set relative path for local imports", &localimport)
+ obj.Flagcount("E", "debug symbol export", &Debug['E'])
+ obj.Flagfn1("I", "dir: add dir to import search path", addidir)
+ obj.Flagcount("K", "debug missing line numbers", &Debug['K'])
+ obj.Flagcount("L", "use full (long) path in error messages", &Debug['L'])
+ obj.Flagcount("M", "debug move generation", &Debug['M'])
+ obj.Flagcount("N", "disable optimizations", &Debug['N'])
+ obj.Flagcount("P", "debug peephole optimizer", &Debug['P'])
+ obj.Flagcount("R", "debug register optimizer", &Debug['R'])
+ obj.Flagcount("S", "print assembly listing", &Debug['S'])
+ obj.Flagfn0("V", "print compiler version", doversion)
+ obj.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
+ obj.Flagstr("asmhdr", "file: write assembly header to named file", &asmhdr)
+ obj.Flagcount("complete", "compiling complete package (no C or assembly)", &pure_go)
+ obj.Flagstr("d", "list: print debug information about items in list", &debugstr)
+ obj.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
+ obj.Flagcount("f", "debug stack frames", &Debug['f'])
+ obj.Flagcount("g", "debug code generation", &Debug['g'])
+ obj.Flagcount("h", "halt on error", &Debug['h'])
+ obj.Flagcount("i", "debug line number stack", &Debug['i'])
+ obj.Flagstr("installsuffix", "pkg directory suffix", &flag_installsuffix)
+ obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
+ obj.Flagcount("l", "disable inlining", &Debug['l'])
+ obj.Flagcount("live", "debug liveness analysis", &debuglive)
+ obj.Flagcount("m", "print optimization decisions", &Debug['m'])
+ obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
+ obj.Flagstr("o", "obj: set output file", &outfile)
+ obj.Flagstr("p", "path: set expected package import path", &myimportpath)
+ obj.Flagcount("pack", "write package file instead of object file", &writearchive)
+ obj.Flagcount("r", "debug generated wrappers", &Debug['r'])
+ obj.Flagcount("race", "enable race detector", &flag_race)
+ obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
+ obj.Flagstr("trimpath", "prefix: remove prefix from recorded source file paths", &Ctxt.Trimpath)
+ obj.Flagcount("u", "reject unsafe code", &safemode)
+ obj.Flagcount("v", "increase debug verbosity", &Debug['v'])
+ obj.Flagcount("w", "debug type checking", &Debug['w'])
+ use_writebarrier = 1
+ obj.Flagcount("wb", "enable write barrier", &use_writebarrier)
+ obj.Flagcount("x", "debug lexer", &Debug['x'])
+ obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
+ if Thearch.Thechar == '6' {
+ obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+ }
+
+ obj.Flagparse(usage)
+ Ctxt.Debugasm = int32(Debug['S'])
+ Ctxt.Debugvlog = int32(Debug['v'])
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if flag_race != 0 {
+ racepkg = mkpkg(newstrlit("runtime/race"))
+ racepkg.Name = "race"
+ }
+
+ // parse -d argument
+ if debugstr != "" {
+ var j int
+ f := strings.Split(debugstr, ",")
+ for i := range f {
+ if f[i] == "" {
+ continue
+ }
+ for j = 0; j < len(debugtab); j++ {
+ if debugtab[j].name == f[i] {
+ if debugtab[j].val != nil {
+ *debugtab[j].val = 1
+ }
+ break
+ }
+ }
+
+ if j >= len(debugtab) {
+ log.Fatalf("unknown debug information -d '%s'\n", f[i])
+ }
+ }
+ }
+
+ // enable inlining. for now:
+ // default: inlining on. (debug['l'] == 1)
+ // -l: inlining off (debug['l'] == 0)
+ // -ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
+ if Debug['l'] <= 1 {
+ Debug['l'] = 1 - Debug['l']
+ }
+
+ if Thearch.Thechar == '8' {
+ p = obj.Getgo386()
+ if p == "387" {
+ Use_sse = 0
+ } else if p == "sse2" {
+ Use_sse = 1
+ } else {
+ log.Fatalf("unsupported setting GO386=%s", p)
+ }
+ }
+
+ Thearch.Betypeinit()
+ if Widthptr == 0 {
+ Fatal("betypeinit failed")
+ }
+
+ lexinit()
+ typeinit()
+ lexinit1()
+ // TODO(rsc): Restore yytinit?
+
+ blockgen = 1
+ dclcontext = PEXTERN
+ nerrors = 0
+ lexlineno = 1
+
+ for _, infile = range flag.Args() {
+ linehist(infile, 0, 0)
+
+ curio.infile = infile
+ var err error
+ curio.bin, err = obj.Bopenr(infile)
+ if err != nil {
+ fmt.Printf("open %s: %v\n", infile, err)
+ errorexit()
+ }
+
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.nlsemi = 0
+ curio.eofnl = 0
+ curio.last = 0
+
+ // Skip initial BOM if present.
+ if obj.Bgetrune(curio.bin) != obj.BOM {
+ obj.Bungetrune(curio.bin)
+ }
+
+ block = 1
+ iota_ = -1000000
+
+ imported_unsafe = 0
+
+ yyparse()
+ if nsyntaxerrors != 0 {
+ errorexit()
+ }
+
+ linehist("<pop>", 0, 0)
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ }
+ }
+
+ testdclstack()
+ mkpackage(localpkg.Name) // final import not used checks
+ lexfini()
+
+ typecheckok = 1
+ if Debug['f'] != 0 {
+ frame(1)
+ }
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ defercheckwidth()
+
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op != ODCL && l.N.Op != OAS {
+ typecheck(&l.N, Etop)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCL || l.N.Op == OAS {
+ typecheck(&l.N, Etop)
+ }
+ }
+ resumecheckwidth()
+
+ // Phase 3: Type check function bodies.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
+ Curfn = l.N
+ decldepth = 1
+ saveerrors()
+ typechecklist(l.N.Nbody, Etop)
+ checkreturn(l.N)
+ if nerrors != 0 {
+ l.N.Nbody = nil // type errors; do not compile
+ }
+ }
+ }
+
+ // Phase 4: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+ Curfn = l.N
+ capturevars(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+
+ // Phase 5: Inlining
+ if Debug['l'] > 1 {
+ // Typecheck imported function bodies if debug['l'] > 1,
+ // otherwise lazily when used or re-exported.
+ for l = importlist; l != nil; l = l.Next {
+ if l.N.Inl != nil {
+ saveerrors()
+ typecheckinl(l.N)
+ }
+ }
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+ }
+
+ if Debug['l'] != 0 {
+ // Find functions that can be inlined and clone them before walk expands them.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ caninl(l.N)
+ }
+ }
+
+ // Expand inlineable calls in all functions
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ inlcalls(l.N)
+ }
+ }
+ }
+
+ // Phase 6: Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ escapes(xtop)
+
+ // Escape analysis moved escaped values off stack.
+ // Move large values off stack too.
+ movelarge(xtop)
+
+ // Phase 7: Transform closure bodies to properly reference captured variables.
+ // This needs to happen before walk, because closures must be transformed
+ // before walk reaches a call of a closure.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+ Curfn = l.N
+ transformclosure(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ // Phase 8: Compile top level functions.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ funccompile(l.N)
+ }
+ }
+
+ if nsavederrors+nerrors == 0 {
+ fninit(xtop)
+ }
+
+ // Phase 9: Check external declarations.
+ for l = externdcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME {
+ typecheck(&l.N, Erv)
+ }
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ dumpobj()
+
+ if asmhdr != "" {
+ dumpasmhdr()
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ Flusherrors()
+}
+
+func saveerrors() {
+ nsavederrors += nerrors
+ nerrors = 0
+}
+
+func arsize(b *obj.Biobuf, name string) int {
+ var buf [ArhdrSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
+
+func skiptopkgdef(b *obj.Biobuf) int {
+ var p string
+ var sz int
+
+ /* archive header */
+ p = obj.Brdline(b, '\n')
+ if p == "" {
+ return 0
+ }
+ if obj.Blinelen(b) != 8 {
+ return 0
+ }
+ if p != "!<arch>\n" {
+ return 0
+ }
+
+ /* symbol table may be first; skip it */
+ sz = arsize(b, "__.GOSYMDEF")
+
+ if sz >= 0 {
+ obj.Bseek(b, int64(sz), 1)
+ } else {
+ obj.Bseek(b, 8, 0)
+ }
+
+ /* package export block is next */
+ sz = arsize(b, "__.PKGDEF")
+
+ if sz <= 0 {
+ return 0
+ }
+ return 1
+}
+
+func addidir(dir string) {
+ var pp **Idir
+
+ if dir == "" {
+ return
+ }
+
+ for pp = &idirs; *pp != nil; pp = &(*pp).link {
+ }
+ *pp = new(Idir)
+ (*pp).link = nil
+ (*pp).dir = dir
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name *Strlit) bool {
+ return strings.HasPrefix(name.S, "/") ||
+ Ctxt.Windows != 0 && len(name.S) >= 3 && yy_isalpha(int(name.S[0])) && name.S[1] == ':' && name.S[2] == '/' ||
+ strings.HasPrefix(name.S, "./") || name.S == "." ||
+ strings.HasPrefix(name.S, "../") || name.S == ".."
+}
+
+func findpkg(name *Strlit) int {
+ var p *Idir
+ var q string
+ var suffix string
+ var suffixsep string
+
+ if islocalname(name) {
+ if safemode != 0 || nolocalimports != 0 {
+ return 0
+ }
+
+ // try .a before .6. important for building libraries:
+ // if there is an array.6 in the array.a library,
+ // want to find all of array.a, not just array.6.
+ namebuf = fmt.Sprintf("%v.a", Zconv(name, 0))
+
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ namebuf = fmt.Sprintf("%v.%c", Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ return 0
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ _ = q
+ if path.Clean(name.S) != name.S {
+ Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
+ return 0
+ }
+
+ for p = idirs; p != nil; p = p.link {
+ namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ namebuf = fmt.Sprintf("%s/%v.%c", p.dir, Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ }
+
+ if goroot != "" {
+ suffix = ""
+ suffixsep = ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race != 0 {
+ suffixsep = "_"
+ suffix = "race"
+ }
+
+ namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.a", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0))
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.%c", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func fakeimport() {
+ importpkg = mkpkg(newstrlit("fake"))
+ cannedimports("fake.6", "$$\n")
+}
+
+func importfile(f *Val, line int) {
+ var imp *obj.Biobuf
+ var file string
+ var p string
+ var q string
+ var tag string
+ var c int32
+ var n int
+ var path_ *Strlit
+ var cleanbuf string
+ var prefix string
+
+ if f.Ctype != CTSTR {
+ Yyerror("import statement not a string")
+ fakeimport()
+ return
+ }
+
+ if len(f.U.Sval.S) == 0 {
+ Yyerror("import path is empty")
+ fakeimport()
+ return
+ }
+
+ if isbadimport(f.U.Sval) {
+ fakeimport()
+ return
+ }
+
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if f.U.Sval.S == "main" {
+ Yyerror("cannot import \"main\"")
+ errorexit()
+ }
+
+ if myimportpath != "" && f.U.Sval.S == myimportpath {
+ Yyerror("import \"%v\" while compiling that package (import cycle)", Zconv(f.U.Sval, 0))
+ errorexit()
+ }
+
+ if f.U.Sval.S == "unsafe" {
+ if safemode != 0 {
+ Yyerror("cannot import package unsafe")
+ errorexit()
+ }
+
+ importpkg = mkpkg(f.U.Sval)
+ cannedimports("unsafe.6", unsafeimport)
+ imported_unsafe = 1
+ return
+ }
+
+ path_ = f.U.Sval
+ if islocalname(path_) {
+ if path_.S[0] == '/' {
+ Yyerror("import path cannot be absolute path")
+ fakeimport()
+ return
+ }
+
+ prefix = Ctxt.Pathname
+ if localimport != "" {
+ prefix = localimport
+ }
+ cleanbuf = prefix
+ cleanbuf += "/"
+ cleanbuf += path_.S
+ cleanbuf = path.Clean(cleanbuf)
+ path_ = newstrlit(cleanbuf)
+
+ if isbadimport(path_) {
+ fakeimport()
+ return
+ }
+ }
+
+ if !(findpkg(path_) != 0) {
+ Yyerror("can't find import: \"%v\"", Zconv(f.U.Sval, 0))
+ errorexit()
+ }
+
+ importpkg = mkpkg(path_)
+
+ // If we already saw that package, feed a dummy statement
+ // to the lexer to avoid parsing export data twice.
+ if importpkg.Imported != 0 {
+ file = namebuf
+ tag = ""
+ if importpkg.Safe {
+ tag = "safe"
+ }
+
+ p = fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+ cannedimports(file, p)
+ return
+ }
+
+ importpkg.Imported = 1
+
+ var err error
+ imp, err = obj.Bopenr(namebuf)
+ if err != nil {
+ Yyerror("can't open import: \"%v\": %v", Zconv(f.U.Sval, 0), err)
+ errorexit()
+ }
+
+ file = namebuf
+
+ n = len(namebuf)
+ if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
+ if !(skiptopkgdef(imp) != 0) {
+ Yyerror("import %s: not a package file", file)
+ errorexit()
+ }
+ }
+
+ // check object header
+ p = obj.Brdstr(imp, '\n', 1)
+
+ if p != "empty archive" {
+ if !strings.HasPrefix(p, "go object ") {
+ Yyerror("import %s: not a go object file", file)
+ errorexit()
+ }
+
+ q = fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ if p[10:] != q {
+ Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ errorexit()
+ }
+ }
+
+ // assume files move (get installed)
+ // so don't record the full path.
+ linehist(file[n-len(path_.S)-2:], -1, 1) // acts as #pragma lib
+
+ /*
+ * position the input right
+ * after $$ and return
+ */
+ pushedio = curio
+
+ curio.bin = imp
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.nlsemi = 0
+ typecheckok = 1
+
+ for {
+ c = int32(getc())
+ if c == EOF {
+ break
+ }
+ if c != '$' {
+ continue
+ }
+ c = int32(getc())
+ if c == EOF {
+ break
+ }
+ if c != '$' {
+ continue
+ }
+ return
+ }
+
+ Yyerror("no import in \"%v\"", Zconv(f.U.Sval, 0))
+ unimportfile()
+}
+
+func unimportfile() {
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ curio.bin = nil
+ } else {
+ lexlineno-- // re correct sys.6 line number
+ }
+
+ curio = pushedio
+
+ pushedio.bin = nil
+ incannedimport = 0
+ typecheckok = 0
+}
+
+func cannedimports(file string, cp string) {
+ lexlineno++ // if sys.6 is included on line 1,
+
+ pushedio = curio
+
+ curio.bin = nil
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.cp = cp
+ curio.nlsemi = 0
+ curio.importsafe = false
+
+ typecheckok = 1
+ incannedimport = 1
+}
+
+func isfrog(c int) bool {
+ // complain about possibly invisible control characters
+ if c < ' ' {
+ return !yy_isspace(c) // exclude good white space
+ }
+
+ if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
+ return true
+ }
+ return false
+}
+
+type Loophack struct {
+ v int
+ next *Loophack
+}
+
+var _yylex_lstk *Loophack
+
+func _yylex(yylval *yySymType) int32 {
+ var c int
+ var c1 int
+ var escflag int
+ var v int64
+ var cp *bytes.Buffer
+ var rune_ uint
+ var s *Sym
+ var h *Loophack
+
+ prevlineno = lineno
+
+l0:
+ c = getc()
+ if yy_isspace(c) {
+ if c == '\n' && curio.nlsemi != 0 {
+ ungetc(c)
+ DBG("lex: implicit semi\n")
+ return ';'
+ }
+
+ goto l0
+ }
+
+ lineno = lexlineno /* start of token */
+
+ if c >= utf8.RuneSelf {
+ /* all multibyte runes are alpha */
+ cp = &lexbuf
+ cp.Reset()
+
+ goto talph
+ }
+
+ if yy_isalpha(c) {
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+ }
+
+ if yy_isdigit(c) {
+ goto tnum
+ }
+
+ switch c {
+ case EOF:
+ lineno = prevlineno
+ ungetc(EOF)
+ return -1
+
+ case '_':
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+
+ case '.':
+ c1 = getc()
+ if yy_isdigit(c1) {
+ cp = &lexbuf
+ cp.Reset()
+ cp.WriteByte(byte(c))
+ c = c1
+ goto casedot
+ }
+
+ if c1 == '.' {
+ c1 = getc()
+ if c1 == '.' {
+ c = LDDD
+ goto lx
+ }
+
+ ungetc(c1)
+ c1 = '.'
+ }
+
+ /* "..." */
+ case '"':
+ lexbuf.Reset()
+ lexbuf.WriteString(`"<string>"`)
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+
+ if escchar('"', &escflag, &v) != 0 {
+ break
+ }
+ if v < utf8.RuneSelf || escflag != 0 {
+ cp.WriteByte(byte(v))
+ } else {
+ rune_ = uint(v)
+ cp.WriteRune(rune(rune_))
+ }
+ }
+
+ goto strlit
+
+ /* `...` */
+ case '`':
+ lexbuf.Reset()
+ lexbuf.WriteString("`<string>`")
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+
+ c = int(getr())
+ if c == '\r' {
+ continue
+ }
+ if c == EOF {
+ Yyerror("eof in string")
+ break
+ }
+
+ if c == '`' {
+ break
+ }
+ cp.WriteRune(rune(c))
+ }
+
+ goto strlit
+
+ /* '.' */
+ case '\'':
+ if escchar('\'', &escflag, &v) != 0 {
+ Yyerror("empty character literal or unescaped ' in character literal")
+ v = '\''
+ }
+
+ if !(escchar('\'', &escflag, &v) != 0) {
+ Yyerror("missing '")
+ ungetc(int(v))
+ }
+
+ yylval.val.U.Xval = new(Mpint)
+ Mpmovecfix(yylval.val.U.Xval, v)
+ yylval.val.Ctype = CTRUNE
+ DBG("lex: codepoint literal\n")
+ litbuf = "string literal"
+ return LLITERAL
+
+ case '/':
+ c1 = getc()
+ if c1 == '*' {
+ var nl int
+
+ nl = 0
+ for {
+ c = int(getr())
+ if c == '\n' {
+ nl = 1
+ }
+ for c == '*' {
+ c = int(getr())
+ if c == '/' {
+ if nl != 0 {
+ ungetc('\n')
+ }
+ goto l0
+ }
+
+ if c == '\n' {
+ nl = 1
+ }
+ }
+
+ if c == EOF {
+ Yyerror("eof in comment")
+ errorexit()
+ }
+ }
+ }
+
+ if c1 == '/' {
+ c = getlinepragma()
+ for {
+ if c == '\n' || c == EOF {
+ ungetc(c)
+ goto l0
+ }
+
+ c = int(getr())
+ }
+ }
+
+ if c1 == '=' {
+ c = ODIV
+ goto asop
+ }
+
+ case ':':
+ c1 = getc()
+ if c1 == '=' {
+ c = LCOLAS
+ yylval.i = int(lexlineno)
+ goto lx
+ }
+
+ case '*':
+ c1 = getc()
+ if c1 == '=' {
+ c = OMUL
+ goto asop
+ }
+
+ case '%':
+ c1 = getc()
+ if c1 == '=' {
+ c = OMOD
+ goto asop
+ }
+
+ case '+':
+ c1 = getc()
+ if c1 == '+' {
+ c = LINC
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OADD
+ goto asop
+ }
+
+ case '-':
+ c1 = getc()
+ if c1 == '-' {
+ c = LDEC
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OSUB
+ goto asop
+ }
+
+ case '>':
+ c1 = getc()
+ if c1 == '>' {
+ c = LRSH
+ c1 = getc()
+ if c1 == '=' {
+ c = ORSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = LGE
+ goto lx
+ }
+
+ c = LGT
+
+ case '<':
+ c1 = getc()
+ if c1 == '<' {
+ c = LLSH
+ c1 = getc()
+ if c1 == '=' {
+ c = OLSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = LLE
+ goto lx
+ }
+
+ if c1 == '-' {
+ c = LCOMM
+ goto lx
+ }
+
+ c = LLT
+
+ case '=':
+ c1 = getc()
+ if c1 == '=' {
+ c = LEQ
+ goto lx
+ }
+
+ case '!':
+ c1 = getc()
+ if c1 == '=' {
+ c = LNE
+ goto lx
+ }
+
+ case '&':
+ c1 = getc()
+ if c1 == '&' {
+ c = LANDAND
+ goto lx
+ }
+
+ if c1 == '^' {
+ c = LANDNOT
+ c1 = getc()
+ if c1 == '=' {
+ c = OANDNOT
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = OAND
+ goto asop
+ }
+
+ case '|':
+ c1 = getc()
+ if c1 == '|' {
+ c = LOROR
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OOR
+ goto asop
+ }
+
+ case '^':
+ c1 = getc()
+ if c1 == '=' {
+ c = OXOR
+ goto asop
+ }
+
+ /*
+ * clumsy dance:
+ * to implement rule that disallows
+ * if T{1}[0] { ... }
+ * but allows
+ * if (T{1}[0]) { ... }
+ * the block bodies for if/for/switch/select
+ * begin with an LBODY token, not '{'.
+ *
+ * when we see the keyword, the next
+ * non-parenthesized '{' becomes an LBODY.
+ * loophack is normally 0.
+ * a keyword makes it go up to 1.
+ * parens push loophack onto a stack and go back to 0.
+ * a '{' with loophack == 1 becomes LBODY and disables loophack.
+ *
+ * i said it was clumsy.
+ */
+ case '(',
+ '[':
+ if loophack != 0 || _yylex_lstk != nil {
+ h = new(Loophack)
+ if h == nil {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ h.v = loophack
+ h.next = _yylex_lstk
+ _yylex_lstk = h
+ loophack = 0
+ }
+
+ goto lx
+
+ case ')',
+ ']':
+ if _yylex_lstk != nil {
+ h = _yylex_lstk
+ loophack = h.v
+ _yylex_lstk = h.next
+ }
+
+ goto lx
+
+ case '{':
+ if loophack == 1 {
+ DBG("%L lex: LBODY\n", lexlineno)
+ loophack = 0
+ return LBODY
+ }
+
+ goto lx
+
+ default:
+ goto lx
+ }
+
+ ungetc(c1)
+
+lx:
+ if c > 0xff {
+ DBG("%L lex: TOKEN %s\n", lexlineno, lexname(c))
+ } else {
+ DBG("%L lex: TOKEN '%c'\n", lexlineno, c)
+ }
+ if isfrog(c) {
+ Yyerror("illegal character 0x%x", uint(c))
+ goto l0
+ }
+
+ if importpkg == nil && (c == '#' || c == '$' || c == '?' || c == '@' || c == '\\') {
+ Yyerror("%s: unexpected %c", "syntax error", c)
+ goto l0
+ }
+
+ return int32(c)
+
+asop:
+ yylval.i = c // rathole to hold which asop
+ DBG("lex: TOKEN ASOP %c\n", c)
+ return LASOP
+
+ /*
+ * cp is set to lexbuf and some
+ * prefix has been stored
+ */
+talph:
+ for {
+
+ if c >= utf8.RuneSelf {
+ ungetc(c)
+ rune_ = uint(getr())
+
+ // 0xb7 · is used for internal names
+ if !unicode.IsLetter(rune(rune_)) && !unicode.IsDigit(rune(rune_)) && (importpkg == nil || rune_ != 0xb7) {
+ Yyerror("invalid identifier character U+%04x", rune_)
+ }
+ cp.WriteRune(rune(rune_))
+ } else if !yy_isalnum(c) && c != '_' {
+ break
+ } else {
+ cp.WriteByte(byte(c))
+ }
+ c = getc()
+ }
+
+ cp = nil
+ ungetc(c)
+
+ s = Lookup(lexbuf.String())
+ switch s.Lexical {
+ case LIGNORE:
+ goto l0
+
+ case LFOR,
+ LIF,
+ LSWITCH,
+ LSELECT:
+ loophack = 1 // see comment about loophack above
+ }
+
+ DBG("lex: %S %s\n", s, lexname(int(s.Lexical)))
+ yylval.sym = s
+ return int32(s.Lexical)
+
+tnum:
+ cp = &lexbuf
+ cp.Reset()
+ if c != '0' {
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ goto dc
+ }
+ }
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == 'x' || c == 'X' {
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c >= 'a' && c <= 'f' {
+ continue
+ }
+ if c >= 'A' && c <= 'F' {
+ continue
+ }
+ if lexbuf.Len() == 2 {
+ Yyerror("malformed hex constant")
+ }
+ if c == 'p' {
+ goto caseep
+ }
+ goto ncu
+ }
+ }
+
+ if c == 'p' { // 0p begins floating point zero
+ goto caseep
+ }
+
+ c1 = 0
+ for {
+
+ if !yy_isdigit(c) {
+ break
+ }
+ if c < '0' || c > '7' {
+ c1 = 1 // not octal
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ if c1 != 0 {
+ Yyerror("malformed octal constant")
+ }
+ goto ncu
+
+dc:
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+
+ncu:
+ cp = nil
+ ungetc(c)
+
+ yylval.val.U.Xval = new(Mpint)
+ mpatofix(yylval.val.U.Xval, lexbuf.String())
+ if yylval.val.U.Xval.Ovf != 0 {
+ Yyerror("overflow in constant")
+ Mpmovecfix(yylval.val.U.Xval, 0)
+ }
+
+ yylval.val.Ctype = CTINT
+ DBG("lex: integer literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+casedot:
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if !yy_isdigit(c) {
+ break
+ }
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ if c != 'e' && c != 'E' {
+ goto caseout
+ }
+
+caseep:
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == '+' || c == '-' {
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if !yy_isdigit(c) {
+ Yyerror("malformed fp constant exponent")
+ }
+ for yy_isdigit(c) {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ goto caseout
+
+ // imaginary constant
+casei:
+ cp = nil
+
+ yylval.val.U.Cval = new(Mpcplx)
+ Mpmovecflt(&yylval.val.U.Cval.Real, 0.0)
+ mpatoflt(&yylval.val.U.Cval.Imag, lexbuf.String())
+ if yylval.val.U.Cval.Imag.Val.Ovf != 0 {
+ Yyerror("overflow in imaginary constant")
+ Mpmovecflt(&yylval.val.U.Cval.Real, 0.0)
+ }
+
+ yylval.val.Ctype = CTCPLX
+ DBG("lex: imaginary literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+caseout:
+ cp = nil
+ ungetc(c)
+
+ yylval.val.U.Fval = new(Mpflt)
+ mpatoflt(yylval.val.U.Fval, lexbuf.String())
+ if yylval.val.U.Fval.Val.Ovf != 0 {
+ Yyerror("overflow in float constant")
+ Mpmovecflt(yylval.val.U.Fval, 0.0)
+ }
+
+ yylval.val.Ctype = CTFLT
+ DBG("lex: floating literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+strlit:
+ yylval.val.U.Sval = &Strlit{S: cp.String()}
+ yylval.val.Ctype = CTSTR
+ DBG("lex: string literal\n")
+ litbuf = "string literal"
+ return LLITERAL
+}
+
+func more(pp *string) bool {
+ p := *pp
+ for p != "" && yy_isspace(int(p[0])) {
+ p = p[1:]
+ }
+ *pp = p
+ return p != ""
+}
+
+/*
+ * read and interpret syntax that looks like
+ * //line parse.y:15
+ * as a discontinuity in sequential line numbers.
+ * the next line of input comes from parse.y:15
+ */
+func getlinepragma() int {
+ var cmd, verb, name string
+ var i int
+ var c int
+ var n int
+ var cp *bytes.Buffer
+ var linep int
+ var h *obj.Hist
+
+ c = int(getr())
+ if c == 'g' {
+ goto go_
+ }
+ if c != 'l' {
+ goto out
+ }
+ for i = 1; i < 5; i++ {
+ c = int(getr())
+ if c != int("line "[i]) {
+ goto out
+ }
+ }
+
+ cp = &lexbuf
+ cp.Reset()
+ linep = 0
+ for {
+ c = int(getr())
+ if c == EOF {
+ goto out
+ }
+ if c == '\n' {
+ break
+ }
+ if c == ' ' {
+ continue
+ }
+ if c == ':' {
+ linep = cp.Len() + 1
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if linep == 0 {
+ goto out
+ }
+ n = 0
+ for _, c := range lexbuf.String()[linep:] {
+ if c < '0' || c > '9' {
+ goto out
+ }
+ n = n*10 + int(c) - '0'
+ if n > 1e8 {
+ Yyerror("line number out of range")
+ errorexit()
+ }
+ }
+
+ if n <= 0 {
+ goto out
+ }
+
+ // try to avoid allocating file name over and over
+ name = lexbuf.String()[:linep-1]
+ for h = Ctxt.Hist; h != nil; h = h.Link {
+ if h.Name != "" && h.Name == name {
+ linehist(h.Name, int32(n), 0)
+ goto out
+ }
+ }
+
+ linehist(name, int32(n), 0)
+ goto out
+
+go_:
+ cp = &lexbuf
+ cp.Reset()
+ cp.WriteByte('g') // already read
+ for {
+ c = int(getr())
+ if c == EOF || c >= utf8.RuneSelf {
+ goto out
+ }
+ if c == '\n' {
+ break
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
+ pragcgo(lexbuf.String())
+ }
+
+ cmd = lexbuf.String()
+ verb = cmd
+ if i := strings.Index(verb, " "); i >= 0 {
+ verb = verb[:i]
+ }
+
+ if verb == "go:linkname" {
+ if !(imported_unsafe != 0) {
+ Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+ }
+ f := strings.Fields(cmd)
+ if len(f) != 3 {
+ Yyerror("usage: //go:linkname localname linkname")
+ goto out
+ }
+
+ Lookup(f[1]).Linkname = f[2]
+ goto out
+ }
+
+ if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+ nointerface = true
+ goto out
+ }
+
+ if verb == "go:noescape" {
+ noescape = true
+ goto out
+ }
+
+ if verb == "go:nosplit" {
+ nosplit = true
+ goto out
+ }
+
+ if verb == "go:nowritebarrier" {
+ if !(compiling_runtime != 0) {
+ Yyerror("//go:nowritebarrier only allowed in runtime")
+ }
+ nowritebarrier = true
+ goto out
+ }
+
+out:
+ return c
+}
+
+func getimpsym(pp *string) string {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] == '"' {
+ return ""
+ }
+ i := 0
+ for i < len(p) && !yy_isspace(int(p[i])) && p[i] != '"' {
+ i++
+ }
+ sym := p[:i]
+ *pp = p[i:]
+ return sym
+}
+
+func getquoted(pp *string) (string, bool) {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] != '"' {
+ return "", false
+ }
+ p = p[1:]
+ i := strings.Index(p, `"`)
+ if i < 0 {
+ return "", false
+ }
+ *pp = p[i+1:]
+ return p[:i], true
+}
+
+// Copied nearly verbatim from the C compiler's #pragma parser.
+// TODO: Rewrite more cleanly once the compiler is written in Go.
+func pragcgo(text string) {
+ var local string
+ var remote string
+ var p string
+ var q string
+ var verb string
+
+ if i := strings.Index(text, " "); i >= 0 {
+ text, q = text[:i], text[i:]
+ }
+
+ verb = text[3:] // skip "go:"
+
+ if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
+ var ok bool
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err1
+ }
+ pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
+ goto out
+
+ err1:
+ Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+ goto out
+ }
+
+ if verb == "dynexport" {
+ verb = "cgo_export_dynamic"
+ }
+ if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
+ local = getimpsym(&q)
+ if local == "" {
+ goto err2
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
+ goto out
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err2
+ }
+ pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
+ goto out
+
+ err2:
+ Yyerror("usage: //go:%s local [remote]", verb)
+ goto out
+ }
+
+ if verb == "cgo_import_dynamic" || verb == "dynimport" {
+ var ok bool
+ local = getimpsym(&q)
+ if local == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
+ goto out
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
+ goto out
+ }
+
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err3
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
+ goto out
+
+ err3:
+ Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
+ goto out
+ }
+
+ if verb == "cgo_import_static" {
+ local = getimpsym(&q)
+ if local == "" || more(&q) {
+ goto err4
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
+ goto out
+
+ err4:
+ Yyerror("usage: //go:cgo_import_static local")
+ goto out
+ }
+
+ if verb == "cgo_ldflag" {
+ var ok bool
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err5
+ }
+ pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
+ goto out
+
+ err5:
+ Yyerror("usage: //go:cgo_ldflag \"arg\"")
+ goto out
+ }
+
+out:
+}
+
+type yy struct{}
+
+var yymsg []struct {
+ yystate, yychar int
+ msg string
+}
+
+func (yy) Lex(v *yySymType) int {
+ return int(yylex(v))
+}
+
+func (yy) Error(msg string) {
+ Yyerror("%s", msg)
+}
+
+func yyparse() {
+ yyParse(yy{})
+}
+
+func yylex(yylval *yySymType) int32 {
+ var lx int
+
+ lx = int(_yylex(yylval))
+
+ if curio.nlsemi != 0 && lx == EOF {
+ // Treat EOF as "end of line" for the purposes
+ // of inserting a semicolon.
+ lx = ';'
+ }
+
+ switch lx {
+ case LNAME,
+ LLITERAL,
+ LBREAK,
+ LCONTINUE,
+ LFALL,
+ LRETURN,
+ LINC,
+ LDEC,
+ ')',
+ '}',
+ ']':
+ curio.nlsemi = 1
+
+ default:
+ curio.nlsemi = 0
+ }
+
+ // Track last two tokens returned by yylex.
+ yyprev = yylast
+
+ yylast = lx
+ return int32(lx)
+}
+
+func getc() int {
+ var c int
+ var c1 int
+ var c2 int
+
+ c = curio.peekc
+ if c != 0 {
+ curio.peekc = curio.peekc1
+ curio.peekc1 = 0
+ goto check
+ }
+
+ if curio.bin == nil {
+ if len(curio.cp) == 0 {
+ c = 0
+ } else {
+ c = int(curio.cp[0])
+ curio.cp = curio.cp[1:]
+ }
+ } else {
+ loop:
+ c = obj.Bgetc(curio.bin)
+ if c == 0xef {
+ c1 = obj.Bgetc(curio.bin)
+ c2 = obj.Bgetc(curio.bin)
+ if c1 == 0xbb && c2 == 0xbf {
+ yyerrorl(int(lexlineno), "Unicode (UTF-8) BOM in middle of file")
+ goto loop
+ }
+
+ obj.Bungetc(curio.bin)
+ obj.Bungetc(curio.bin)
+ }
+ }
+
+check:
+ switch c {
+ case 0:
+ if curio.bin != nil {
+ Yyerror("illegal NUL byte")
+ break
+ }
+ fallthrough
+
+ // insert \n at EOF
+ case EOF:
+ if curio.eofnl != 0 || curio.last == '\n' {
+ return EOF
+ }
+ curio.eofnl = 1
+ c = '\n'
+ fallthrough
+
+ case '\n':
+ if pushedio.bin == nil {
+ lexlineno++
+ }
+ }
+
+ curio.last = c
+ return c
+}
+
+func ungetc(c int) {
+ curio.peekc1 = curio.peekc
+ curio.peekc = c
+ if c == '\n' && pushedio.bin == nil {
+ lexlineno--
+ }
+}
+
+func getr() int32 {
+ var buf [utf8.UTFMax]byte
+
+ for i := 0; ; i++ {
+ c := getc()
+ if i == 0 && c < utf8.RuneSelf {
+ return int32(c)
+ }
+ buf[i] = byte(c)
+ if i+1 == len(buf) || utf8.FullRune(buf[:i+1]) {
+ r, w := utf8.DecodeRune(buf[:i+1])
+ if r == utf8.RuneError && w == 1 {
+ lineno = lexlineno
+ Yyerror("illegal UTF-8 sequence % x", buf[:i+1])
+ }
+ return int32(r)
+ }
+ }
+}
+
+func escchar(e int, escflg *int, val *int64) int {
+ var i int
+ var u int
+ var c int
+ var l int64
+
+ *escflg = 0
+
+ c = int(getr())
+ switch c {
+ case EOF:
+ Yyerror("eof in string")
+ return 1
+
+ case '\n':
+ Yyerror("newline in string")
+ return 1
+
+ case '\\':
+ break
+
+ default:
+ if c == e {
+ return 1
+ }
+ *val = int64(c)
+ return 0
+ }
+
+ u = 0
+ c = int(getr())
+ switch c {
+ case 'x':
+ *escflg = 1 // it's a byte
+ i = 2
+ goto hex
+
+ case 'u':
+ i = 4
+ u = 1
+ goto hex
+
+ case 'U':
+ i = 8
+ u = 1
+ goto hex
+
+ case '0',
+ '1',
+ '2',
+ '3',
+ '4',
+ '5',
+ '6',
+ '7':
+ *escflg = 1 // it's a byte
+ goto oct
+
+ case 'a':
+ c = '\a'
+ case 'b':
+ c = '\b'
+ case 'f':
+ c = '\f'
+ case 'n':
+ c = '\n'
+ case 'r':
+ c = '\r'
+ case 't':
+ c = '\t'
+ case 'v':
+ c = '\v'
+ case '\\':
+ c = '\\'
+
+ default:
+ if c != e {
+ Yyerror("unknown escape sequence: %c", c)
+ }
+ }
+
+ *val = int64(c)
+ return 0
+
+hex:
+ l = 0
+ for ; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '9' {
+ l = l*16 + int64(c) - '0'
+ continue
+ }
+
+ if c >= 'a' && c <= 'f' {
+ l = l*16 + int64(c) - 'a' + 10
+ continue
+ }
+
+ if c >= 'A' && c <= 'F' {
+ l = l*16 + int64(c) - 'A' + 10
+ continue
+ }
+
+ Yyerror("non-hex character in escape sequence: %c", c)
+ ungetc(c)
+ break
+ }
+
+ if u != 0 && (l > utf8.MaxRune || (0xd800 <= l && l < 0xe000)) {
+ Yyerror("invalid Unicode code point in escape sequence: %#x", l)
+ l = utf8.RuneError
+ }
+
+ *val = l
+ return 0
+
+oct:
+ l = int64(c) - '0'
+ for i = 2; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + int64(c) - '0'
+ continue
+ }
+
+ Yyerror("non-octal character in escape sequence: %c", c)
+ ungetc(c)
+ }
+
+ if l > 255 {
+ Yyerror("octal escape value > 255: %d", l)
+ }
+
+ *val = l
+ return 0
+}
+
+var syms = []struct {
+ name string
+ lexical int
+ etype int
+ op int
+}{
+ /* name lexical etype op
+ */
+ /* basic types */
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int8", LNAME, TINT8, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int16", LNAME, TINT16, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int32", LNAME, TINT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int64", LNAME, TINT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint8", LNAME, TUINT8, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint16", LNAME, TUINT16, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint32", LNAME, TUINT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint64", LNAME, TUINT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"float32", LNAME, TFLOAT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"float64", LNAME, TFLOAT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex64", LNAME, TCOMPLEX64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex128", LNAME, TCOMPLEX128, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"bool", LNAME, TBOOL, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"string", LNAME, TSTRING, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"any", LNAME, TANY, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"break", LBREAK, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"case", LCASE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"chan", LCHAN, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"const", LCONST, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"continue", LCONTINUE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"default", LDEFAULT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"else", LELSE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"defer", LDEFER, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"fallthrough", LFALL, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"for", LFOR, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"func", LFUNC, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"go", LGO, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"goto", LGOTO, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"if", LIF, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"import", LIMPORT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"interface", LINTERFACE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"map", LMAP, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"package", LPACKAGE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"range", LRANGE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"return", LRETURN, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"select", LSELECT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"struct", LSTRUCT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"switch", LSWITCH, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"type", LTYPE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"var", LVAR, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"append", LNAME, Txxx, OAPPEND},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"cap", LNAME, Txxx, OCAP},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"close", LNAME, Txxx, OCLOSE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex", LNAME, Txxx, OCOMPLEX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"copy", LNAME, Txxx, OCOPY},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"delete", LNAME, Txxx, ODELETE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"imag", LNAME, Txxx, OIMAG},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"len", LNAME, Txxx, OLEN},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"make", LNAME, Txxx, OMAKE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"new", LNAME, Txxx, ONEW},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"panic", LNAME, Txxx, OPANIC},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"print", LNAME, Txxx, OPRINT},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"println", LNAME, Txxx, OPRINTN},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"real", LNAME, Txxx, OREAL},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"recover", LNAME, Txxx, ORECOVER},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"notwithstanding", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"thetruthofthematter", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"despiteallobjections", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"whereas", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"insofaras", LIGNORE, Txxx, OXXX},
+}
+
+func lexinit() {
+ var i int
+ var lex int
+ var s *Sym
+ var s1 *Sym
+ var t *Type
+ var etype int
+ var v Val
+
+ /*
+ * initialize basic types array
+ * initialize known symbols
+ */
+ for i = 0; i < len(syms); i++ {
+ lex = syms[i].lexical
+ s = Lookup(syms[i].name)
+ s.Lexical = uint16(lex)
+
+ etype = syms[i].etype
+ if etype != Txxx {
+ if etype < 0 || etype >= len(Types) {
+ Fatal("lexinit: %s bad etype", s.Name)
+ }
+ s1 = Pkglookup(syms[i].name, builtinpkg)
+ t = Types[etype]
+ if t == nil {
+ t = typ(etype)
+ t.Sym = s1
+
+ if etype != TANY && etype != TSTRING {
+ dowidth(t)
+ }
+ Types[etype] = t
+ }
+
+ s1.Lexical = LNAME
+ s1.Def = typenod(t)
+ continue
+ }
+
+ etype = syms[i].op
+ if etype != OXXX {
+ s1 = Pkglookup(syms[i].name, builtinpkg)
+ s1.Lexical = LNAME
+ s1.Def = Nod(ONAME, nil, nil)
+ s1.Def.Sym = s1
+ s1.Def.Etype = uint8(etype)
+ s1.Def.Builtin = 1
+ }
+ }
+
+ // logically, the type of a string literal.
+ // types[TSTRING] is the named type string
+ // (the type of x in var x string or var x = "hello").
+ // this is the ideal form
+ // (the type of x in const x = "hello").
+ idealstring = typ(TSTRING)
+
+ idealbool = typ(TBOOL)
+
+ s = Pkglookup("true", builtinpkg)
+ s.Def = Nodbool(1)
+ s.Def.Sym = Lookup("true")
+ s.Def.Type = idealbool
+
+ s = Pkglookup("false", builtinpkg)
+ s.Def = Nodbool(0)
+ s.Def.Sym = Lookup("false")
+ s.Def.Type = idealbool
+
+ s = Lookup("_")
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+ nblank = s.Def
+
+ s = Pkglookup("_", builtinpkg)
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+
+ Types[TNIL] = typ(TNIL)
+ s = Pkglookup("nil", builtinpkg)
+ v.Ctype = CTNIL
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+}
+
+func lexinit1() {
+ var s *Sym
+ var s1 *Sym
+ var t *Type
+ var f *Type
+ var rcvr *Type
+ var in *Type
+ var out *Type
+
+ // t = interface { Error() string }
+ rcvr = typ(TSTRUCT)
+
+ rcvr.Type = typ(TFIELD)
+ rcvr.Type.Type = Ptrto(typ(TSTRUCT))
+ rcvr.Funarg = 1
+ in = typ(TSTRUCT)
+ in.Funarg = 1
+ out = typ(TSTRUCT)
+ out.Type = typ(TFIELD)
+ out.Type.Type = Types[TSTRING]
+ out.Funarg = 1
+ f = typ(TFUNC)
+ *getthis(f) = rcvr
+ *Getoutarg(f) = out
+ *getinarg(f) = in
+ f.Thistuple = 1
+ f.Intuple = 0
+ f.Outnamed = 0
+ f.Outtuple = 1
+ t = typ(TINTER)
+ t.Type = typ(TFIELD)
+ t.Type.Sym = Lookup("Error")
+ t.Type.Type = f
+
+ // error type
+ s = Lookup("error")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("error", builtinpkg)
+ errortype = t
+ errortype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(errortype)
+
+ // byte alias
+ s = Lookup("byte")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("byte", builtinpkg)
+ bytetype = typ(TUINT8)
+ bytetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(bytetype)
+
+ // rune alias
+ s = Lookup("rune")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("rune", builtinpkg)
+ runetype = typ(TINT32)
+ runetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(runetype)
+}
+
+func lexfini() {
+ var s *Sym
+ var lex int
+ var etype int
+ var i int
+ var v Val
+
+ for i = 0; i < len(syms); i++ {
+ lex = syms[i].lexical
+ if lex != LNAME {
+ continue
+ }
+ s = Lookup(syms[i].name)
+ s.Lexical = uint16(lex)
+
+ etype = syms[i].etype
+ if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
+ s.Def = typenod(Types[etype])
+ s.Origpkg = builtinpkg
+ }
+
+ etype = syms[i].op
+ if etype != OXXX && s.Def == nil {
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ s.Def.Etype = uint8(etype)
+ s.Def.Builtin = 1
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // backend-specific builtin types (e.g. int).
+ for i = range Thearch.Typedefs {
+ s = Lookup(Thearch.Typedefs[i].Name)
+ if s.Def == nil {
+ s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // there's only so much table-driven we can handle.
+ // these are special cases.
+ s = Lookup("byte")
+
+ if s.Def == nil {
+ s.Def = typenod(bytetype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("error")
+ if s.Def == nil {
+ s.Def = typenod(errortype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("rune")
+ if s.Def == nil {
+ s.Def = typenod(runetype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("nil")
+ if s.Def == nil {
+ v.Ctype = CTNIL
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("iota")
+ if s.Def == nil {
+ s.Def = Nod(OIOTA, nil, nil)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("true")
+ if s.Def == nil {
+ s.Def = Nodbool(1)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("false")
+ if s.Def == nil {
+ s.Def = Nodbool(0)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ nodfp = Nod(ONAME, nil, nil)
+ nodfp.Type = Types[TINT32]
+ nodfp.Xoffset = 0
+ nodfp.Class = PPARAM
+ nodfp.Sym = Lookup(".fp")
+}
+
+var lexn = []struct {
+ lex int
+ name string
+}{
+ struct {
+ lex int
+ name string
+ }{LANDAND, "ANDAND"},
+ struct {
+ lex int
+ name string
+ }{LANDNOT, "ANDNOT"},
+ struct {
+ lex int
+ name string
+ }{LASOP, "ASOP"},
+ struct {
+ lex int
+ name string
+ }{LBREAK, "BREAK"},
+ struct {
+ lex int
+ name string
+ }{LCASE, "CASE"},
+ struct {
+ lex int
+ name string
+ }{LCHAN, "CHAN"},
+ struct {
+ lex int
+ name string
+ }{LCOLAS, "COLAS"},
+ struct {
+ lex int
+ name string
+ }{LCOMM, "<-"},
+ struct {
+ lex int
+ name string
+ }{LCONST, "CONST"},
+ struct {
+ lex int
+ name string
+ }{LCONTINUE, "CONTINUE"},
+ struct {
+ lex int
+ name string
+ }{LDDD, "..."},
+ struct {
+ lex int
+ name string
+ }{LDEC, "DEC"},
+ struct {
+ lex int
+ name string
+ }{LDEFAULT, "DEFAULT"},
+ struct {
+ lex int
+ name string
+ }{LDEFER, "DEFER"},
+ struct {
+ lex int
+ name string
+ }{LELSE, "ELSE"},
+ struct {
+ lex int
+ name string
+ }{LEQ, "EQ"},
+ struct {
+ lex int
+ name string
+ }{LFALL, "FALL"},
+ struct {
+ lex int
+ name string
+ }{LFOR, "FOR"},
+ struct {
+ lex int
+ name string
+ }{LFUNC, "FUNC"},
+ struct {
+ lex int
+ name string
+ }{LGE, "GE"},
+ struct {
+ lex int
+ name string
+ }{LGO, "GO"},
+ struct {
+ lex int
+ name string
+ }{LGOTO, "GOTO"},
+ struct {
+ lex int
+ name string
+ }{LGT, "GT"},
+ struct {
+ lex int
+ name string
+ }{LIF, "IF"},
+ struct {
+ lex int
+ name string
+ }{LIMPORT, "IMPORT"},
+ struct {
+ lex int
+ name string
+ }{LINC, "INC"},
+ struct {
+ lex int
+ name string
+ }{LINTERFACE, "INTERFACE"},
+ struct {
+ lex int
+ name string
+ }{LLE, "LE"},
+ struct {
+ lex int
+ name string
+ }{LLITERAL, "LITERAL"},
+ struct {
+ lex int
+ name string
+ }{LLSH, "LSH"},
+ struct {
+ lex int
+ name string
+ }{LLT, "LT"},
+ struct {
+ lex int
+ name string
+ }{LMAP, "MAP"},
+ struct {
+ lex int
+ name string
+ }{LNAME, "NAME"},
+ struct {
+ lex int
+ name string
+ }{LNE, "NE"},
+ struct {
+ lex int
+ name string
+ }{LOROR, "OROR"},
+ struct {
+ lex int
+ name string
+ }{LPACKAGE, "PACKAGE"},
+ struct {
+ lex int
+ name string
+ }{LRANGE, "RANGE"},
+ struct {
+ lex int
+ name string
+ }{LRETURN, "RETURN"},
+ struct {
+ lex int
+ name string
+ }{LRSH, "RSH"},
+ struct {
+ lex int
+ name string
+ }{LSELECT, "SELECT"},
+ struct {
+ lex int
+ name string
+ }{LSTRUCT, "STRUCT"},
+ struct {
+ lex int
+ name string
+ }{LSWITCH, "SWITCH"},
+ struct {
+ lex int
+ name string
+ }{LTYPE, "TYPE"},
+ struct {
+ lex int
+ name string
+ }{LVAR, "VAR"},
+}
+
+var lexname_buf string
+
+func lexname(lex int) string {
+ var i int
+
+ for i = 0; i < len(lexn); i++ {
+ if lexn[i].lex == lex {
+ return lexn[i].name
+ }
+ }
+ lexname_buf = fmt.Sprintf("LEX-%d", lex)
+ return lexname_buf
+}
+
+var yytfix = []struct {
+ have string
+ want string
+}{
+ struct {
+ have string
+ want string
+ }{"$end", "EOF"},
+ struct {
+ have string
+ want string
+ }{"LLITERAL", "literal"},
+ struct {
+ have string
+ want string
+ }{"LASOP", "op="},
+ struct {
+ have string
+ want string
+ }{"LBREAK", "break"},
+ struct {
+ have string
+ want string
+ }{"LCASE", "case"},
+ struct {
+ have string
+ want string
+ }{"LCHAN", "chan"},
+ struct {
+ have string
+ want string
+ }{"LCOLAS", ":="},
+ struct {
+ have string
+ want string
+ }{"LCONST", "const"},
+ struct {
+ have string
+ want string
+ }{"LCONTINUE", "continue"},
+ struct {
+ have string
+ want string
+ }{"LDDD", "..."},
+ struct {
+ have string
+ want string
+ }{"LDEFAULT", "default"},
+ struct {
+ have string
+ want string
+ }{"LDEFER", "defer"},
+ struct {
+ have string
+ want string
+ }{"LELSE", "else"},
+ struct {
+ have string
+ want string
+ }{"LFALL", "fallthrough"},
+ struct {
+ have string
+ want string
+ }{"LFOR", "for"},
+ struct {
+ have string
+ want string
+ }{"LFUNC", "func"},
+ struct {
+ have string
+ want string
+ }{"LGO", "go"},
+ struct {
+ have string
+ want string
+ }{"LGOTO", "goto"},
+ struct {
+ have string
+ want string
+ }{"LIF", "if"},
+ struct {
+ have string
+ want string
+ }{"LIMPORT", "import"},
+ struct {
+ have string
+ want string
+ }{"LINTERFACE", "interface"},
+ struct {
+ have string
+ want string
+ }{"LMAP", "map"},
+ struct {
+ have string
+ want string
+ }{"LNAME", "name"},
+ struct {
+ have string
+ want string
+ }{"LPACKAGE", "package"},
+ struct {
+ have string
+ want string
+ }{"LRANGE", "range"},
+ struct {
+ have string
+ want string
+ }{"LRETURN", "return"},
+ struct {
+ have string
+ want string
+ }{"LSELECT", "select"},
+ struct {
+ have string
+ want string
+ }{"LSTRUCT", "struct"},
+ struct {
+ have string
+ want string
+ }{"LSWITCH", "switch"},
+ struct {
+ have string
+ want string
+ }{"LTYPE", "type"},
+ struct {
+ have string
+ want string
+ }{"LVAR", "var"},
+ struct {
+ have string
+ want string
+ }{"LANDAND", "&&"},
+ struct {
+ have string
+ want string
+ }{"LANDNOT", "&^"},
+ struct {
+ have string
+ want string
+ }{"LBODY", "{"},
+ struct {
+ have string
+ want string
+ }{"LCOMM", "<-"},
+ struct {
+ have string
+ want string
+ }{"LDEC", "--"},
+ struct {
+ have string
+ want string
+ }{"LINC", "++"},
+ struct {
+ have string
+ want string
+ }{"LEQ", "=="},
+ struct {
+ have string
+ want string
+ }{"LGE", ">="},
+ struct {
+ have string
+ want string
+ }{"LGT", ">"},
+ struct {
+ have string
+ want string
+ }{"LLE", "<="},
+ struct {
+ have string
+ want string
+ }{"LLT", "<"},
+ struct {
+ have string
+ want string
+ }{"LLSH", "<<"},
+ struct {
+ have string
+ want string
+ }{"LRSH", ">>"},
+ struct {
+ have string
+ want string
+ }{"LOROR", "||"},
+ struct {
+ have string
+ want string
+ }{"LNE", "!="},
+ // spell out to avoid confusion with punctuation in error messages
+ struct {
+ have string
+ want string
+ }{"';'", "semicolon or newline"},
+ struct {
+ have string
+ want string
+ }{"','", "comma"},
+}
+
+func pkgnotused(lineno int, path_ *Strlit, name string) {
+ var elem string
+
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem = path_.S
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ yyerrorl(int(lineno), "imported and not used: \"%v\"", Zconv(path_, 0))
+ } else {
+ yyerrorl(int(lineno), "imported and not used: \"%v\" as %s", Zconv(path_, 0), name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ var s *Sym
+ var h int32
+ var p string
+
+ if localpkg.Name == "" {
+ if pkgname == "_" {
+ Yyerror("invalid package name _")
+ }
+ localpkg.Name = pkgname
+ } else {
+ if pkgname != localpkg.Name {
+ Yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ }
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Def == nil || s.Pkg != localpkg {
+ continue
+ }
+ if s.Def.Op == OPACK {
+ // throw away top-level package name leftover
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ if !(s.Def.Used != 0) && !(nsyntaxerrors != 0) {
+ pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
+ }
+ s.Def = nil
+ continue
+ }
+
+ if s.Def.Sym != s {
+ // throw away top-level name left over
+ // from previous import . "x"
+ if s.Def.Pack != nil && !(s.Def.Pack.Used != 0) && !(nsyntaxerrors != 0) {
+ pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
+ s.Def.Pack.Used = 1
+ }
+
+ s.Def = nil
+ continue
+ }
+ }
+ }
+ }
+
+ if outfile == "" {
+ p = infile
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if Ctxt.Windows != 0 {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ namebuf = p
+ if i := strings.LastIndex(namebuf, "."); i >= 0 {
+ namebuf = namebuf[:i]
+ }
+ outfile = fmt.Sprintf("%s.%c", namebuf, Thearch.Thechar)
+ }
+}
diff --git a/src/cmd/internal/gc/md5.go b/src/cmd/internal/gc/md5.go
new file mode 100644
index 0000000000..862fdd553e
--- /dev/null
+++ b/src/cmd/internal/gc/md5.go
@@ -0,0 +1,329 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// 64-bit MD5 (does full MD5 but returns 64 bits only).
+// Translation of ../../crypto/md5/md5*.go.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+type MD5 struct {
+ s [4]uint32
+ x [64]uint8
+ nx int
+ len uint64
+}
+
+const (
+ _Chunk = 64
+)
+
+const (
+ _Init0 = 0x67452301
+ _Init1 = 0xEFCDAB89
+ _Init2 = 0x98BADCFE
+ _Init3 = 0x10325476
+)
+
+func md5reset(d *MD5) {
+ d.s[0] = _Init0
+ d.s[1] = _Init1
+ d.s[2] = _Init2
+ d.s[3] = _Init3
+ d.nx = 0
+ d.len = 0
+}
+
+func md5write(d *MD5, p []byte, nn int) {
+ var i int
+ var n int
+
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n = nn
+ if n > _Chunk-d.nx {
+ n = _Chunk - d.nx
+ }
+ for i = 0; i < n; i++ {
+ d.x[d.nx+i] = uint8(p[i])
+ }
+ d.nx += n
+ if d.nx == _Chunk {
+ md5block(d, d.x[:], _Chunk)
+ d.nx = 0
+ }
+
+ p = p[n:]
+ nn -= n
+ }
+
+ n = md5block(d, p, nn)
+ p = p[n:]
+ nn -= n
+ if nn > 0 {
+ for i = 0; i < nn; i++ {
+ d.x[i] = uint8(p[i])
+ }
+ d.nx = nn
+ }
+}
+
+func md5sum(d *MD5, hi *uint64) uint64 {
+ var tmp [64]uint8
+ var i int
+ var len uint64
+
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ len = d.len
+
+ tmp = [64]uint8{}
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ md5write(d, tmp[:], int(56-len%64))
+ } else {
+ md5write(d, tmp[:], int(64+56-len%64))
+ }
+
+ // Length in bits.
+ len <<= 3
+
+ for i = 0; i < 8; i++ {
+ tmp[i] = uint8(len >> uint(8*i))
+ }
+ md5write(d, tmp[:], 8)
+
+ if d.nx != 0 {
+ Fatal("md5sum")
+ }
+
+ if hi != nil {
+ *hi = uint64(d.s[2]) | uint64(d.s[3])<<32
+ }
+ return uint64(d.s[0]) | uint64(d.s[1])<<32
+}
+
+// MD5 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+// table[i] = int((1<<32) * abs(sin(i+1 radians))).
+var table = [64]uint32{
+ // round 1
+ 0xd76aa478,
+ 0xe8c7b756,
+ 0x242070db,
+ 0xc1bdceee,
+ 0xf57c0faf,
+ 0x4787c62a,
+ 0xa8304613,
+ 0xfd469501,
+ 0x698098d8,
+ 0x8b44f7af,
+ 0xffff5bb1,
+ 0x895cd7be,
+ 0x6b901122,
+ 0xfd987193,
+ 0xa679438e,
+ 0x49b40821,
+
+ // round 2
+ 0xf61e2562,
+ 0xc040b340,
+ 0x265e5a51,
+ 0xe9b6c7aa,
+ 0xd62f105d,
+ 0x2441453,
+ 0xd8a1e681,
+ 0xe7d3fbc8,
+ 0x21e1cde6,
+ 0xc33707d6,
+ 0xf4d50d87,
+ 0x455a14ed,
+ 0xa9e3e905,
+ 0xfcefa3f8,
+ 0x676f02d9,
+ 0x8d2a4c8a,
+
+ // round3
+ 0xfffa3942,
+ 0x8771f681,
+ 0x6d9d6122,
+ 0xfde5380c,
+ 0xa4beea44,
+ 0x4bdecfa9,
+ 0xf6bb4b60,
+ 0xbebfbc70,
+ 0x289b7ec6,
+ 0xeaa127fa,
+ 0xd4ef3085,
+ 0x4881d05,
+ 0xd9d4d039,
+ 0xe6db99e5,
+ 0x1fa27cf8,
+ 0xc4ac5665,
+
+ // round 4
+ 0xf4292244,
+ 0x432aff97,
+ 0xab9423a7,
+ 0xfc93a039,
+ 0x655b59c3,
+ 0x8f0ccc92,
+ 0xffeff47d,
+ 0x85845dd1,
+ 0x6fa87e4f,
+ 0xfe2ce6e0,
+ 0xa3014314,
+ 0x4e0811a1,
+ 0xf7537e82,
+ 0xbd3af235,
+ 0x2ad7d2bb,
+ 0xeb86d391,
+}
+
+var shift1 = []uint32{7, 12, 17, 22}
+
+var shift2 = []uint32{5, 9, 14, 20}
+
+var shift3 = []uint32{4, 11, 16, 23}
+
+var shift4 = []uint32{6, 10, 15, 21}
+
+func md5block(dig *MD5, p []byte, nn int) int {
+ var a uint32
+ var b uint32
+ var c uint32
+ var d uint32
+ var aa uint32
+ var bb uint32
+ var cc uint32
+ var dd uint32
+ var i int
+ var j int
+ var n int
+ var X [16]uint32
+
+ a = dig.s[0]
+ b = dig.s[1]
+ c = dig.s[2]
+ d = dig.s[3]
+ n = 0
+
+ for nn >= _Chunk {
+ aa = a
+ bb = b
+ cc = c
+ dd = d
+
+ for i = 0; i < 16; i++ {
+ j = i * 4
+ X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+ }
+
+ // Round 1.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var f uint32
+ x = uint32(i)
+ t = uint32(i)
+ s = shift1[i%4]
+ f = ((c ^ d) & b) ^ d
+ a += f + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 2.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var g uint32
+
+ x = (1 + 5*uint32(i)) % 16
+ t = 16 + uint32(i)
+ s = shift2[i%4]
+ g = ((b ^ c) & d) ^ c
+ a += g + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 3.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var h uint32
+
+ x = (5 + 3*uint32(i)) % 16
+ t = 32 + uint32(i)
+ s = shift3[i%4]
+ h = b ^ c ^ d
+ a += h + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 4.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var s uint32
+ var t uint32
+ var ii uint32
+
+ x = (7 * uint32(i)) % 16
+ s = shift4[i%4]
+ t = 48 + uint32(i)
+ ii = c ^ (b | ^d)
+ a += ii + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ a += aa
+ b += bb
+ c += cc
+ d += dd
+
+ p = p[_Chunk:]
+ n += _Chunk
+ nn -= _Chunk
+ }
+
+ dig.s[0] = a
+ dig.s[1] = b
+ dig.s[2] = c
+ dig.s[3] = d
+ return n
+}
diff --git a/src/cmd/internal/gc/mparith1.go b/src/cmd/internal/gc/mparith1.go
new file mode 100644
index 0000000000..45f16d969c
--- /dev/null
+++ b/src/cmd/internal/gc/mparith1.go
@@ -0,0 +1,698 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "math"
+)
+
+/// uses arithmetic
+
+func mpcmpfixflt(a *Mpint, b *Mpflt) int {
+ var buf string
+ var c Mpflt
+
+ buf = fmt.Sprintf("%v", Bconv(a, 0))
+ mpatoflt(&c, buf)
+ return mpcmpfltflt(&c, b)
+}
+
+func mpcmpfltfix(a *Mpflt, b *Mpint) int {
+ var buf string
+ var c Mpflt
+
+ buf = fmt.Sprintf("%v", Bconv(b, 0))
+ mpatoflt(&c, buf)
+ return mpcmpfltflt(a, &c)
+}
+
+func Mpcmpfixfix(a *Mpint, b *Mpint) int {
+ var c Mpint
+
+ mpmovefixfix(&c, a)
+ mpsubfixfix(&c, b)
+ return mptestfix(&c)
+}
+
+func mpcmpfixc(b *Mpint, c int64) int {
+ var c1 Mpint
+
+ Mpmovecfix(&c1, c)
+ return Mpcmpfixfix(b, &c1)
+}
+
+func mpcmpfltflt(a *Mpflt, b *Mpflt) int {
+ var c Mpflt
+
+ mpmovefltflt(&c, a)
+ mpsubfltflt(&c, b)
+ return mptestflt(&c)
+}
+
+func mpcmpfltc(b *Mpflt, c float64) int {
+ var a Mpflt
+
+ Mpmovecflt(&a, c)
+ return mpcmpfltflt(b, &a)
+}
+
+func mpsubfixfix(a *Mpint, b *Mpint) {
+ mpnegfix(a)
+ mpaddfixfix(a, b, 0)
+ mpnegfix(a)
+}
+
+func mpsubfltflt(a *Mpflt, b *Mpflt) {
+ mpnegflt(a)
+ mpaddfltflt(a, b)
+ mpnegflt(a)
+}
+
+func mpaddcfix(a *Mpint, c int64) {
+ var b Mpint
+
+ Mpmovecfix(&b, c)
+ mpaddfixfix(a, &b, 0)
+}
+
+func mpaddcflt(a *Mpflt, c float64) {
+ var b Mpflt
+
+ Mpmovecflt(&b, c)
+ mpaddfltflt(a, &b)
+}
+
+func mpmulcfix(a *Mpint, c int64) {
+ var b Mpint
+
+ Mpmovecfix(&b, c)
+ mpmulfixfix(a, &b)
+}
+
+func mpmulcflt(a *Mpflt, c float64) {
+ var b Mpflt
+
+ Mpmovecflt(&b, c)
+ mpmulfltflt(a, &b)
+}
+
+func mpdivfixfix(a *Mpint, b *Mpint) {
+ var q Mpint
+ var r Mpint
+
+ mpdivmodfixfix(&q, &r, a, b)
+ mpmovefixfix(a, &q)
+}
+
+func mpmodfixfix(a *Mpint, b *Mpint) {
+ var q Mpint
+ var r Mpint
+
+ mpdivmodfixfix(&q, &r, a, b)
+ mpmovefixfix(a, &r)
+}
+
+func mpcomfix(a *Mpint) {
+ var b Mpint
+
+ Mpmovecfix(&b, 1)
+ mpnegfix(a)
+ mpsubfixfix(a, &b)
+}
+
+func Mpmovefixflt(a *Mpflt, b *Mpint) {
+ a.Val = *b
+ a.Exp = 0
+ mpnorm(a)
+}
+
+// convert (truncate) b to a.
+// return -1 (but still convert) if b was non-integer.
+func mpexactfltfix(a *Mpint, b *Mpflt) int {
+ var f Mpflt
+
+ *a = b.Val
+ Mpshiftfix(a, int(b.Exp))
+ if b.Exp < 0 {
+ f.Val = *a
+ f.Exp = 0
+ mpnorm(&f)
+ if mpcmpfltflt(b, &f) != 0 {
+ return -1
+ }
+ }
+
+ return 0
+}
+
+func mpmovefltfix(a *Mpint, b *Mpflt) int {
+ var f Mpflt
+ var i int
+
+ if mpexactfltfix(a, b) == 0 {
+ return 0
+ }
+
+ // try rounding down a little
+ f = *b
+
+ f.Val.A[0] = 0
+ if mpexactfltfix(a, &f) == 0 {
+ return 0
+ }
+
+ // try rounding up a little
+ for i = 1; i < Mpprec; i++ {
+ f.Val.A[i]++
+ if f.Val.A[i] != Mpbase {
+ break
+ }
+ f.Val.A[i] = 0
+ }
+
+ mpnorm(&f)
+ if mpexactfltfix(a, &f) == 0 {
+ return 0
+ }
+
+ return -1
+}
+
+func mpmovefixfix(a *Mpint, b *Mpint) {
+ *a = *b
+}
+
+func mpmovefltflt(a *Mpflt, b *Mpflt) {
+ *a = *b
+}
+
+var tab = []float64{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7}
+
+func mppow10flt(a *Mpflt, p int) {
+ if p < 0 {
+ panic("abort")
+ }
+ if p < len(tab) {
+ Mpmovecflt(a, tab[p])
+ return
+ }
+
+ mppow10flt(a, p>>1)
+ mpmulfltflt(a, a)
+ if p&1 != 0 {
+ mpmulcflt(a, 10)
+ }
+}
+
+func mphextofix(a *Mpint, s string) {
+ var c int8
+ var d int
+ var bit int
+ var hexdigitp int
+ var end int
+
+ for s != "" && s[0] == '0' {
+ s = s[1:]
+ }
+
+ // overflow
+ if 4*len(s) > Mpscale*Mpprec {
+ a.Ovf = 1
+ return
+ }
+
+ end = len(s) - 1
+ for hexdigitp = end; hexdigitp >= 0; hexdigitp-- {
+ c = int8(s[hexdigitp])
+ if c >= '0' && c <= '9' {
+ d = int(c) - '0'
+ } else if c >= 'A' && c <= 'F' {
+ d = int(c) - 'A' + 10
+ } else {
+ d = int(c) - 'a' + 10
+ }
+
+ bit = 4 * (end - hexdigitp)
+ for d > 0 {
+ if d&1 != 0 {
+ a.A[bit/Mpscale] |= int(1) << uint(bit%Mpscale)
+ }
+ bit++
+ d = d >> 1
+ }
+ }
+}
+
+//
+// floating point input
+// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
+//
+func mpatoflt(a *Mpflt, as string) {
+ var b Mpflt
+ var dp int
+ var c int
+ var f int
+ var ef int
+ var ex int
+ var eb int
+ var base int
+ var s string
+ var start string
+
+ for as[0] == ' ' || as[0] == '\t' {
+ as = as[1:]
+ }
+
+ /* determine base */
+ s = as
+
+ base = -1
+ for base == -1 {
+ if s == "" {
+ base = 10
+ break
+ }
+ c := s[0]
+ s = s[1:]
+ switch c {
+ case '-',
+ '+':
+ break
+
+ case '0':
+ if s != "" && s[0] == 'x' {
+ base = 16
+ } else {
+ base = 10
+ }
+
+ default:
+ base = 10
+ }
+ }
+
+ s = as
+ dp = 0 /* digits after decimal point */
+ f = 0 /* sign */
+ ex = 0 /* exponent */
+ eb = 0 /* binary point */
+
+ Mpmovecflt(a, 0.0)
+ if base == 16 {
+ start = ""
+ for {
+ c, _ = intstarstringplusplus(s)
+ if c == '-' {
+ f = 1
+ s = s[1:]
+ } else if c == '+' {
+ s = s[1:]
+ } else if c == '0' && s[1] == 'x' {
+ s = s[2:]
+ start = s
+ } else if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ } else {
+ break
+ }
+ }
+
+ if start == "" {
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
+
+ mphextofix(&a.Val, start[:len(start)-len(s)])
+ if a.Val.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+
+ a.Exp = 0
+ mpnorm(a)
+ }
+
+ for {
+ c, s = intstarstringplusplus(s)
+ switch c {
+ default:
+ Yyerror("malformed constant: %s (at %c)", as, c)
+ goto bad
+
+ case '-':
+ f = 1
+ fallthrough
+
+ case ' ',
+ '\t',
+ '+':
+ continue
+
+ case '.':
+ if base == 16 {
+ Yyerror("decimal point in hex constant: %s", as)
+ goto bad
+ }
+
+ dp = 1
+ continue
+
+ case '1',
+ '2',
+ '3',
+ '4',
+ '5',
+ '6',
+ '7',
+ '8',
+ '9',
+ '0':
+ mpmulcflt(a, 10)
+ mpaddcflt(a, float64(c)-'0')
+ if dp != 0 {
+ dp++
+ }
+ continue
+
+ case 'P',
+ 'p':
+ eb = 1
+ fallthrough
+
+ case 'E',
+ 'e':
+ ex = 0
+ ef = 0
+ for {
+ c, s = intstarstringplusplus(s)
+ if c == '+' || c == ' ' || c == '\t' {
+ continue
+ }
+ if c == '-' {
+ ef = 1
+ continue
+ }
+
+ if c >= '0' && c <= '9' {
+ ex = ex*10 + (c - '0')
+ if ex > 1e8 {
+ Yyerror("constant exponent out of range: %s", as)
+ errorexit()
+ }
+
+ continue
+ }
+
+ break
+ }
+
+ if ef != 0 {
+ ex = -ex
+ }
+ fallthrough
+
+ case 0:
+ break
+ }
+
+ break
+ }
+
+ if eb != 0 {
+ if dp != 0 {
+ Yyerror("decimal point and binary point in constant: %s", as)
+ goto bad
+ }
+
+ mpsetexp(a, int(a.Exp)+ex)
+ goto out
+ }
+
+ if dp != 0 {
+ dp--
+ }
+ if mpcmpfltc(a, 0.0) != 0 {
+ if ex >= dp {
+ mppow10flt(&b, ex-dp)
+ mpmulfltflt(a, &b)
+ } else {
+ // 4 approximates least_upper_bound(log2(10)).
+ if dp-ex >= 1<<(32-3) || int(int16(4*(dp-ex))) != 4*(dp-ex) {
+ Mpmovecflt(a, 0.0)
+ } else {
+ mppow10flt(&b, dp-ex)
+ mpdivfltflt(a, &b)
+ }
+ }
+ }
+
+out:
+ if f != 0 {
+ mpnegflt(a)
+ }
+ return
+
+bad:
+ Mpmovecflt(a, 0.0)
+}
+
+//
+// fixed point input
+// required syntax is [+-][0[x]]d*
+//
+func mpatofix(a *Mpint, as string) {
+ var c int
+ var f int
+ var s string
+ var s0 string
+
+ s = as
+ f = 0
+ Mpmovecfix(a, 0)
+
+ c, s = intstarstringplusplus(s)
+ switch c {
+ case '-':
+ f = 1
+ fallthrough
+
+ case '+':
+ c, s = intstarstringplusplus(s)
+ if c != '0' {
+ break
+ }
+ fallthrough
+
+ case '0':
+ goto oct
+ }
+
+ for c != 0 {
+ if c >= '0' && c <= '9' {
+ mpmulcfix(a, 10)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed decimal constant: %s", as)
+ goto bad
+ }
+
+ goto out
+
+oct:
+ c, s = intstarstringplusplus(s)
+ if c == 'x' || c == 'X' {
+ goto hex
+ }
+ for c != 0 {
+ if c >= '0' && c <= '7' {
+ mpmulcfix(a, 8)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed octal constant: %s", as)
+ goto bad
+ }
+
+ goto out
+
+hex:
+ s0 = s
+ c, _ = intstarstringplusplus(s)
+ for c != 0 {
+ if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ c, _ = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
+
+ mphextofix(a, s0)
+ if a.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+
+out:
+ if f != 0 {
+ mpnegfix(a)
+ }
+ return
+
+bad:
+ Mpmovecfix(a, 0)
+}
+
+func Bconv(xval *Mpint, flag int) string {
+ var buf [500]byte
+ var p int
+ var fp string
+
+ var q Mpint
+ var r Mpint
+ var ten Mpint
+ var sixteen Mpint
+ var f int
+ var digit int
+
+ mpmovefixfix(&q, xval)
+ f = 0
+ if mptestfix(&q) < 0 {
+ f = 1
+ mpnegfix(&q)
+ }
+
+ p = len(buf)
+ if flag&obj.FmtSharp != 0 /*untyped*/ {
+ // Hexadecimal
+ Mpmovecfix(&sixteen, 16)
+
+ for {
+ mpdivmodfixfix(&q, &r, &q, &sixteen)
+ digit = int(Mpgetfix(&r))
+ if digit < 10 {
+ p--
+ buf[p] = byte(digit + '0')
+ } else {
+ p--
+ buf[p] = byte(digit - 10 + 'A')
+ }
+ if mptestfix(&q) <= 0 {
+ break
+ }
+ }
+
+ p--
+ buf[p] = 'x'
+ p--
+ buf[p] = '0'
+ } else {
+ // Decimal
+ Mpmovecfix(&ten, 10)
+
+ for {
+ mpdivmodfixfix(&q, &r, &q, &ten)
+ p--
+ buf[p] = byte(Mpgetfix(&r) + '0')
+ if mptestfix(&q) <= 0 {
+ break
+ }
+ }
+ }
+
+ if f != 0 {
+ p--
+ buf[p] = '-'
+ }
+ fp += string(buf[p:])
+ return fp
+}
+
+func Fconv(fvp *Mpflt, flag int) string {
+ var buf string
+ var fp string
+
+ var fv Mpflt
+ var d float64
+ var dexp float64
+ var exp int
+
+ if flag&obj.FmtSharp != 0 /*untyped*/ {
+ // alternate form - decimal for error messages.
+ // for well in range, convert to double and use print's %g
+ exp = int(fvp.Exp) + sigfig(fvp)*Mpscale
+
+ if -900 < exp && exp < 900 {
+ d = mpgetflt(fvp)
+ if d >= 0 && (flag&obj.FmtSign != 0 /*untyped*/) {
+ fp += fmt.Sprintf("+")
+ }
+ fp += fmt.Sprintf("%g", d)
+ return fp
+ }
+
+ // very out of range. compute decimal approximation by hand.
+ // decimal exponent
+ dexp = float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
+ exp = int(dexp)
+
+ // decimal mantissa
+ fv = *fvp
+
+ fv.Val.Neg = 0
+ fv.Exp = 0
+ d = mpgetflt(&fv)
+ d *= math.Pow(10, dexp-float64(exp))
+ for d >= 9.99995 {
+ d /= 10
+ exp++
+ }
+
+ if fvp.Val.Neg != 0 {
+ fp += fmt.Sprintf("-")
+ } else if flag&obj.FmtSign != 0 /*untyped*/ {
+ fp += fmt.Sprintf("+")
+ }
+ fp += fmt.Sprintf("%.5fe+%d", d, exp)
+ return fp
+ }
+
+ if sigfig(fvp) == 0 {
+ buf = fmt.Sprintf("0p+0")
+ goto out
+ }
+
+ fv = *fvp
+
+ for fv.Val.A[0] == 0 {
+ Mpshiftfix(&fv.Val, -Mpscale)
+ fv.Exp += Mpscale
+ }
+
+ for fv.Val.A[0]&1 == 0 {
+ Mpshiftfix(&fv.Val, -1)
+ fv.Exp += 1
+ }
+
+ if fv.Exp >= 0 {
+ buf = fmt.Sprintf("%vp+%d", Bconv(&fv.Val, obj.FmtSharp), fv.Exp)
+ goto out
+ }
+
+ buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
+
+out:
+ fp += buf
+ return fp
+}
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
new file mode 100644
index 0000000000..415adf079c
--- /dev/null
+++ b/src/cmd/internal/gc/mparith2.go
@@ -0,0 +1,728 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+//
+// return the significant
+// words of the argument
+//
+func mplen(a *Mpint) int {
+ var i int
+ var n int
+
+ n = -1
+ for i = 0; i < Mpprec; i++ {
+ if a.A[i] != 0 {
+ n = i
+ }
+ }
+
+ return n + 1
+}
+
+//
+// left shift mpint by one
+// ignores sign
+//
+func mplsh(a *Mpint, quiet int) {
+ var x int
+ var i int
+ var c int
+
+ c = 0
+ for i = 0; i < Mpprec; i++ {
+ x = (a.A[i] << 1) + c
+ c = 0
+ if x >= Mpbase {
+ x -= Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ a.Ovf = uint8(c)
+ if a.Ovf != 0 && !(quiet != 0) {
+ Yyerror("constant shift overflow")
+ }
+}
+
+//
+// left shift mpint by Mpscale
+// ignores sign
+//
+func mplshw(a *Mpint, quiet int) {
+ var i int
+
+ i = Mpprec - 1
+ if a.A[i] != 0 {
+ a.Ovf = 1
+ if !(quiet != 0) {
+ Yyerror("constant shift overflow")
+ }
+ }
+
+ for ; i > 0; i-- {
+ a.A[i] = a.A[i-1]
+ }
+ a.A[i] = 0
+}
+
+//
+// right shift mpint by one
+// ignores sign and overflow
+//
+func mprsh(a *Mpint) {
+ var x int
+ var lo int
+ var i int
+ var c int
+
+ c = 0
+ lo = a.A[0] & 1
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = a.A[i]
+ a.A[i] = (x + c) >> 1
+ c = 0
+ if x&1 != 0 {
+ c = Mpbase
+ }
+ }
+
+ if a.Neg != 0 && lo != 0 {
+ mpaddcfix(a, -1)
+ }
+}
+
+//
+// right shift mpint by Mpscale
+// ignores sign and overflow
+//
+func mprshw(a *Mpint) {
+ var lo int
+ var i int
+
+ lo = a.A[0]
+ for i = 0; i < Mpprec-1; i++ {
+ a.A[i] = a.A[i+1]
+ }
+
+ a.A[i] = 0
+ if a.Neg != 0 && lo != 0 {
+ mpaddcfix(a, -1)
+ }
+}
+
+//
+// return the sign of (abs(a)-abs(b))
+//
+func mpcmp(a *Mpint, b *Mpint) int {
+ var x int
+ var i int
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in cmp")
+ }
+ return 0
+ }
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = a.A[i] - b.A[i]
+ if x > 0 {
+ return +1
+ }
+ if x < 0 {
+ return -1
+ }
+ }
+
+ return 0
+}
+
+//
+// negate a
+// ignore sign and ovf
+//
+func mpneg(a *Mpint) {
+ var x int
+ var i int
+ var c int
+
+ c = 0
+ for i = 0; i < Mpprec; i++ {
+ x = -a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+}
+
+// shift left by s (or right by -s)
+func Mpshiftfix(a *Mpint, s int) {
+ if s >= 0 {
+ for s >= Mpscale {
+ mplshw(a, 0)
+ s -= Mpscale
+ }
+
+ for s > 0 {
+ mplsh(a, 0)
+ s--
+ }
+ } else {
+ s = -s
+ for s >= Mpscale {
+ mprshw(a)
+ s -= Mpscale
+ }
+
+ for s > 0 {
+ mprsh(a)
+ s--
+ }
+ }
+}
+
+/// implements fix arihmetic
+
+func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
+ var i int
+ var c int
+ var x int
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpaddxx")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ c = 0
+ if a.Neg != b.Neg {
+ goto sub
+ }
+
+ // perform a+b
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] + b.A[i] + c
+ c = 0
+ if x >= Mpbase {
+ x -= Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ a.Ovf = uint8(c)
+ if a.Ovf != 0 && !(quiet != 0) {
+ Yyerror("constant addition overflow")
+ }
+
+ return
+
+ // perform a-b
+sub:
+ switch mpcmp(a, b) {
+ case 0:
+ Mpmovecfix(a, 0)
+
+ case 1:
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] - b.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ case -1:
+ a.Neg ^= 1
+ for i = 0; i < Mpprec; i++ {
+ x = b.A[i] - a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+ }
+}
+
+func mpmulfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var j int
+ var na int
+ var nb int
+ var x int
+ var s Mpint
+ var q Mpint
+ var c *Mpint
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpmulfixfix")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ // pick the smaller
+ // to test for bits
+ na = mplen(a)
+
+ nb = mplen(b)
+ if na > nb {
+ mpmovefixfix(&s, a)
+ c = b
+ na = nb
+ } else {
+ mpmovefixfix(&s, b)
+ c = a
+ }
+
+ s.Neg = 0
+
+ Mpmovecfix(&q, 0)
+ for i = 0; i < na; i++ {
+ x = c.A[i]
+ for j = 0; j < Mpscale; j++ {
+ if x&1 != 0 {
+ if s.Ovf != 0 {
+ q.Ovf = 1
+ goto out
+ }
+
+ mpaddfixfix(&q, &s, 1)
+ if q.Ovf != 0 {
+ goto out
+ }
+ }
+
+ mplsh(&s, 1)
+ x >>= 1
+ }
+ }
+
+out:
+ q.Neg = a.Neg ^ b.Neg
+ mpmovefixfix(a, &q)
+ if a.Ovf != 0 {
+ Yyerror("constant multiplication overflow")
+ }
+}
+
+func mpmulfract(a *Mpint, b *Mpint) {
+ var i int
+ var j int
+ var x int
+ var s Mpint
+ var q Mpint
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpmulflt")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ mpmovefixfix(&s, b)
+ s.Neg = 0
+ Mpmovecfix(&q, 0)
+
+ i = Mpprec - 1
+ x = a.A[i]
+ if x != 0 {
+ Yyerror("mpmulfract not normal")
+ }
+
+ for i--; i >= 0; i-- {
+ x = a.A[i]
+ if x == 0 {
+ mprshw(&s)
+ continue
+ }
+
+ for j = 0; j < Mpscale; j++ {
+ x <<= 1
+ if x&Mpbase != 0 {
+ mpaddfixfix(&q, &s, 1)
+ }
+ mprsh(&s)
+ }
+ }
+
+ q.Neg = a.Neg ^ b.Neg
+ mpmovefixfix(a, &q)
+ if a.Ovf != 0 {
+ Yyerror("constant multiplication overflow")
+ }
+}
+
+func mporfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] | b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpandfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpandfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] & b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpandnotfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpandnotfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] &^ b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpxorfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] ^ b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mplshfixfix(a *Mpint, b *Mpint) {
+ var s int64
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ s = Mpgetfix(b)
+ if s < 0 || s >= Mpprec*Mpscale {
+ Yyerror("stupid shift: %d", s)
+ Mpmovecfix(a, 0)
+ return
+ }
+
+ Mpshiftfix(a, int(s))
+}
+
+func mprshfixfix(a *Mpint, b *Mpint) {
+ var s int64
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mprshfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ s = Mpgetfix(b)
+ if s < 0 || s >= Mpprec*Mpscale {
+ Yyerror("stupid shift: %d", s)
+ if a.Neg != 0 {
+ Mpmovecfix(a, -1)
+ } else {
+ Mpmovecfix(a, 0)
+ }
+ return
+ }
+
+ Mpshiftfix(a, int(-s))
+}
+
+func mpnegfix(a *Mpint) {
+ a.Neg ^= 1
+}
+
+func Mpgetfix(a *Mpint) int64 {
+ var v int64
+
+ if a.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("constant overflow")
+ }
+ return 0
+ }
+
+ v = int64(uint64(a.A[0]))
+ v |= int64(uint64(a.A[1]) << Mpscale)
+ v |= int64(uint64(a.A[2]) << (Mpscale + Mpscale))
+ if a.Neg != 0 {
+ v = int64(-uint64(v))
+ }
+ return v
+}
+
+func Mpmovecfix(a *Mpint, c int64) {
+ var i int
+ var x int64
+
+ a.Neg = 0
+ a.Ovf = 0
+
+ x = c
+ if x < 0 {
+ a.Neg = 1
+ x = int64(-uint64(x))
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ a.A[i] = int(x & Mpmask)
+ x >>= Mpscale
+ }
+}
+
+func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
+ var i int
+ var ns int
+ var ds int
+
+ ns = int(n.Neg)
+ ds = int(d.Neg)
+ n.Neg = 0
+ d.Neg = 0
+
+ mpmovefixfix(r, n)
+ Mpmovecfix(q, 0)
+
+ // shift denominator until it
+ // is larger than numerator
+ for i = 0; i < Mpprec*Mpscale; i++ {
+ if mpcmp(d, r) > 0 {
+ break
+ }
+ mplsh(d, 1)
+ }
+
+ // if it never happens
+ // denominator is probably zero
+ if i >= Mpprec*Mpscale {
+ q.Ovf = 1
+ r.Ovf = 1
+ n.Neg = uint8(ns)
+ d.Neg = uint8(ds)
+ Yyerror("constant division overflow")
+ return
+ }
+
+ // shift denominator back creating
+ // quotient a bit at a time
+ // when done the remaining numerator
+ // will be the remainder
+ for ; i > 0; i-- {
+ mplsh(q, 1)
+ mprsh(d)
+ if mpcmp(d, r) <= 0 {
+ mpaddcfix(q, 1)
+ mpsubfixfix(r, d)
+ }
+ }
+
+ n.Neg = uint8(ns)
+ d.Neg = uint8(ds)
+ r.Neg = uint8(ns)
+ q.Neg = uint8(ns ^ ds)
+}
+
+func mpiszero(a *Mpint) int {
+ var i int
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ if a.A[i] != 0 {
+ return 0
+ }
+ }
+ return 1
+}
+
+func mpdivfract(a *Mpint, b *Mpint) {
+ var n Mpint
+ var d Mpint
+ var i int
+ var j int
+ var neg int
+ var x int
+
+ mpmovefixfix(&n, a) // numerator
+ mpmovefixfix(&d, b) // denominator
+
+ neg = int(n.Neg) ^ int(d.Neg)
+
+ n.Neg = 0
+ d.Neg = 0
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = 0
+ for j = 0; j < Mpscale; j++ {
+ x <<= 1
+ if mpcmp(&d, &n) <= 0 {
+ if !(mpiszero(&d) != 0) {
+ x |= 1
+ }
+ mpsubfixfix(&n, &d)
+ }
+
+ mprsh(&d)
+ }
+
+ a.A[i] = x
+ }
+
+ a.Neg = uint8(neg)
+}
+
+func mptestfix(a *Mpint) int {
+ var b Mpint
+ var r int
+
+ Mpmovecfix(&b, 0)
+ r = mpcmp(a, &b)
+ if a.Neg != 0 {
+ if r > 0 {
+ return -1
+ }
+ if r < 0 {
+ return +1
+ }
+ }
+
+ return r
+}
diff --git a/src/cmd/internal/gc/mparith3.go b/src/cmd/internal/gc/mparith3.go
new file mode 100644
index 0000000000..61bf9e9aad
--- /dev/null
+++ b/src/cmd/internal/gc/mparith3.go
@@ -0,0 +1,377 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "math"
+)
+
+/*
+ * returns the leading non-zero
+ * word of the number
+ */
+func sigfig(a *Mpflt) int {
+ var i int
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ if a.Val.A[i] != 0 {
+ break
+ }
+ }
+
+ //print("sigfig %d %d\n", i-z+1, z);
+ return i + 1
+}
+
+/*
+ * sets the exponent.
+ * a too large exponent is an error.
+ * a too small exponent rounds the number to zero.
+ */
+func mpsetexp(a *Mpflt, exp int) {
+ if int(int16(exp)) != exp {
+ if exp > 0 {
+ Yyerror("float constant is too large")
+ a.Exp = 0x7fff
+ } else {
+ Mpmovecflt(a, 0)
+ }
+ } else {
+ a.Exp = int16(exp)
+ }
+}
+
+/*
+ * shifts the leading non-zero
+ * word of the number to Mpnorm
+ */
+func mpnorm(a *Mpflt) {
+ var s int
+ var os int
+ var x int
+
+ os = sigfig(a)
+ if os == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ // this will normalize to the nearest word
+ x = a.Val.A[os-1]
+
+ s = (Mpnorm - os) * Mpscale
+
+ // further normalize to the nearest bit
+ for {
+ x <<= 1
+ if x&Mpbase != 0 {
+ break
+ }
+ s++
+ if x == 0 {
+ // this error comes from trying to
+ // convert an Inf or something
+ // where the initial x=0x80000000
+ s = (Mpnorm - os) * Mpscale
+
+ break
+ }
+ }
+
+ Mpshiftfix(&a.Val, s)
+ mpsetexp(a, int(a.Exp)-s)
+}
+
+/// implements float arihmetic
+
+func mpaddfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+ var s int
+ var c Mpflt
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\n%v + %v", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ mpmovefltflt(a, b)
+ goto out
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ goto out
+ }
+
+ s = int(a.Exp) - int(b.Exp)
+ if s > 0 {
+ // a is larger, shift b right
+ mpmovefltflt(&c, b)
+
+ Mpshiftfix(&c.Val, -s)
+ mpaddfixfix(&a.Val, &c.Val, 0)
+ goto out
+ }
+
+ if s < 0 {
+ // b is larger, shift a right
+ Mpshiftfix(&a.Val, s)
+
+ mpsetexp(a, int(a.Exp)-s)
+ mpaddfixfix(&a.Val, &b.Val, 0)
+ goto out
+ }
+
+ mpaddfixfix(&a.Val, &b.Val, 0)
+
+out:
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpmulfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("%v\n * %v\n", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ // zero
+ mpmovefltflt(a, b)
+
+ return
+ }
+
+ mpmulfract(&a.Val, &b.Val)
+ mpsetexp(a, (int(a.Exp)+int(b.Exp))+Mpscale*Mpprec-Mpscale-1)
+
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpdivfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+ var c Mpflt
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("%v\n / %v\n", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ // zero and ovfl
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ a.Val.Ovf = 1
+ Yyerror("constant division by zero")
+ return
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ // adjust b to top
+ mpmovefltflt(&c, b)
+
+ Mpshiftfix(&c.Val, Mpscale)
+
+ // divide
+ mpdivfract(&a.Val, &c.Val)
+
+ mpsetexp(a, (int(a.Exp)-int(c.Exp))-Mpscale*(Mpprec-1)+1)
+
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
+ var s int
+ var i int
+ var e int
+ var minexp int
+ var v uint64
+ var f float64
+
+ if a.Val.Ovf != 0 && nsavederrors+nerrors == 0 {
+ Yyerror("mpgetflt ovf")
+ }
+
+ s = sigfig(a)
+ if s == 0 {
+ return 0
+ }
+
+ if s != Mpnorm {
+ Yyerror("mpgetflt norm")
+ mpnorm(a)
+ }
+
+ for a.Val.A[Mpnorm-1]&Mpsign == 0 {
+ Mpshiftfix(&a.Val, 1)
+ mpsetexp(a, int(a.Exp)-1) // can set 'a' to zero
+ s = sigfig(a)
+ if s == 0 {
+ return 0
+ }
+ }
+
+ // pick up the mantissa, a rounding bit, and a tie-breaking bit in a uvlong
+ s = prec + 2
+
+ v = 0
+ for i = Mpnorm - 1; s >= Mpscale; i-- {
+ v = v<<Mpscale | uint64(a.Val.A[i])
+ s -= Mpscale
+ }
+
+ if s > 0 {
+ v = v<<uint(s) | uint64(a.Val.A[i])>>uint(Mpscale-s)
+ if a.Val.A[i]&((1<<uint(Mpscale-s))-1) != 0 {
+ v |= 1
+ }
+ i--
+ }
+
+ for ; i >= 0; i-- {
+ if a.Val.A[i] != 0 {
+ v |= 1
+ }
+ }
+
+ // gradual underflow
+ e = Mpnorm*Mpscale + int(a.Exp) - prec
+
+ minexp = bias + 1 - prec + 1
+ if e < minexp {
+ s = minexp - e
+ if s > prec+1 {
+ s = prec + 1
+ }
+ if v&((1<<uint(s))-1) != 0 {
+ v |= 1 << uint(s)
+ }
+ v >>= uint(s)
+ e = minexp
+ }
+
+ // round to even
+ v |= (v & 4) >> 2
+
+ v += v & 1
+ v >>= 2
+
+ f = float64(v)
+ f = math.Ldexp(f, e)
+
+ if a.Val.Neg != 0 {
+ f = -f
+ }
+
+ return f
+}
+
+func mpgetflt(a *Mpflt) float64 {
+ return mpgetfltN(a, 53, -1023)
+}
+
+func mpgetflt32(a *Mpflt) float64 {
+ return mpgetfltN(a, 24, -127)
+}
+
+func Mpmovecflt(a *Mpflt, c float64) {
+ var i int
+ var f float64
+ var l int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\nconst %g", c)
+ }
+ Mpmovecfix(&a.Val, 0)
+ a.Exp = 0
+ if c == 0 {
+ goto out
+ }
+ if c < 0 {
+ a.Val.Neg = 1
+ c = -c
+ }
+
+ f, i = math.Frexp(c)
+ a.Exp = int16(i)
+
+ for i = 0; i < 10; i++ {
+ f = f * Mpbase
+ l = int(math.Floor(f))
+ f = f - float64(l)
+ a.Exp -= Mpscale
+ a.Val.A[0] = l
+ if f == 0 {
+ break
+ }
+ Mpshiftfix(&a.Val, Mpscale)
+ }
+
+out:
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n", Fconv(a, 0))
+ }
+}
+
+func mpnegflt(a *Mpflt) {
+ a.Val.Neg ^= 1
+}
+
+func mptestflt(a *Mpflt) int {
+ var s int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\n%v?", Fconv(a, 0))
+ }
+ s = sigfig(a)
+ if s != 0 {
+ s = +1
+ if a.Val.Neg != 0 {
+ s = -1
+ }
+ }
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %d\n", s)
+ }
+ return s
+}
diff --git a/src/cmd/internal/gc/obj.go b/src/cmd/internal/gc/obj.go
new file mode 100644
index 0000000000..afaf87c4f5
--- /dev/null
+++ b/src/cmd/internal/gc/obj.go
@@ -0,0 +1,481 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * architecture-independent object file output
+ */
+const (
+ ArhdrSize = 60
+)
+
+func formathdr(arhdr []byte, name string, size int64) {
+ copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}
+
+func dumpobj() {
+ var externs *NodeList
+ var tmp *NodeList
+ var arhdr [ArhdrSize]byte
+ var startobj int64
+ var size int64
+ var zero *Sym
+
+ var err error
+ bout, err = obj.Bopenw(outfile)
+ if err != nil {
+ Flusherrors()
+ fmt.Printf("can't create %s: %v\n", outfile, err)
+ errorexit()
+ }
+
+ startobj = 0
+ if writearchive != 0 {
+ obj.Bwritestring(bout, "!<arch>\n")
+ arhdr = [ArhdrSize]byte{}
+ obj.Bwrite(bout, arhdr[:])
+ startobj = obj.Boffset(bout)
+ }
+
+ fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ dumpexport()
+
+ if writearchive != 0 {
+ obj.Bflush(bout)
+ size = obj.Boffset(bout) - startobj
+ if size&1 != 0 {
+ obj.Bputc(bout, 0)
+ }
+ obj.Bseek(bout, startobj-ArhdrSize, 0)
+ formathdr(arhdr[:], "__.PKGDEF", size)
+ obj.Bwrite(bout, arhdr[:])
+ obj.Bflush(bout)
+
+ obj.Bseek(bout, startobj+size+(size&1), 0)
+ arhdr = [ArhdrSize]byte{}
+ obj.Bwrite(bout, arhdr[:])
+ startobj = obj.Boffset(bout)
+ fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ }
+
+ if pragcgobuf != "" {
+ if writearchive != 0 {
+ // write empty export section; must be before cgo section
+ fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+ }
+
+ fmt.Fprintf(bout, "\n$$ // cgo\n")
+ fmt.Fprintf(bout, "%s\n$$\n\n", pragcgobuf)
+ }
+
+ fmt.Fprintf(bout, "\n!\n")
+
+ externs = nil
+ if externdcl != nil {
+ externs = externdcl.End
+ }
+
+ dumpglobls()
+ dumptypestructs()
+
+ // Dump extra globals.
+ tmp = externdcl
+
+ if externs != nil {
+ externdcl = externs.Next
+ }
+ dumpglobls()
+ externdcl = tmp
+
+ zero = Pkglookup("zerovalue", Runtimepkg)
+ ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
+
+ dumpdata()
+ obj.Writeobjdirect(Ctxt, bout)
+
+ if writearchive != 0 {
+ obj.Bflush(bout)
+ size = obj.Boffset(bout) - startobj
+ if size&1 != 0 {
+ obj.Bputc(bout, 0)
+ }
+ obj.Bseek(bout, startobj-ArhdrSize, 0)
+ namebuf = fmt.Sprintf("_go_.%c", Thearch.Thechar)
+ formathdr(arhdr[:], namebuf, size)
+ obj.Bwrite(bout, arhdr[:])
+ }
+
+ obj.Bterm(bout)
+}
+
+func dumpglobls() {
+ var n *Node
+ var l *NodeList
+
+ // add globals
+ for l = externdcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ONAME {
+ continue
+ }
+
+ if n.Type == nil {
+ Fatal("external %v nil type\n", Nconv(n, 0))
+ }
+ if n.Class == PFUNC {
+ continue
+ }
+ if n.Sym.Pkg != localpkg {
+ continue
+ }
+ dowidth(n.Type)
+
+ ggloblnod(n)
+ }
+
+ for l = funcsyms; l != nil; l = l.Next {
+ n = l.N
+ dsymptr(n.Sym, 0, n.Sym.Def.Shortname.Sym, 0)
+ ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ // Do not reprocess funcsyms on next dumpglobls call.
+ funcsyms = nil
+}
+
+func Bputname(b *obj.Biobuf, s *obj.LSym) {
+ obj.Bwritestring(b, s.Name)
+ obj.Bputc(b, 0)
+}
+
+func Linksym(s *Sym) *obj.LSym {
+ var p string
+
+ if s == nil {
+ return nil
+ }
+ if s.Lsym != nil {
+ return s.Lsym
+ }
+ if isblanksym(s) {
+ s.Lsym = obj.Linklookup(Ctxt, "_", 0)
+ } else if s.Linkname != "" {
+ s.Lsym = obj.Linklookup(Ctxt, s.Linkname, 0)
+ } else {
+ p = fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+ s.Lsym = obj.Linklookup(Ctxt, p, 0)
+ }
+
+ return s.Lsym
+}
+
+func duintxx(s *Sym, off int, v uint64, wid int) int {
+ // Update symbol data directly instead of generating a
+ // DATA instruction that liblink will have to interpret later.
+ // This reduces compilation time and memory usage.
+ off = int(Rnd(int64(off), int64(wid)))
+
+ return int(obj.Setuintxx(Ctxt, Linksym(s), int64(off), v, int64(wid)))
+}
+
+func duint8(s *Sym, off int, v uint8) int {
+ return duintxx(s, off, uint64(v), 1)
+}
+
+func duint16(s *Sym, off int, v uint16) int {
+ return duintxx(s, off, uint64(v), 2)
+}
+
+func duint32(s *Sym, off int, v uint32) int {
+ return duintxx(s, off, uint64(v), 4)
+}
+
+func duint64(s *Sym, off int, v uint64) int {
+ return duintxx(s, off, v, 8)
+}
+
+func duintptr(s *Sym, off int, v uint64) int {
+ return duintxx(s, off, v, Widthptr)
+}
+
+var stringsym_gen int
+
+func stringsym(s string) *Sym {
+ var sym *Sym
+ var off int
+ var n int
+ var m int
+ var tmp struct {
+ lit Strlit
+ buf string
+ }
+ var pkg *Pkg
+
+ if len(s) > 100 {
+ // huge strings are made static to avoid long names
+ stringsym_gen++
+ namebuf = fmt.Sprintf(".gostring.%d", stringsym_gen)
+
+ pkg = localpkg
+ } else {
+ // small strings get named by their contents,
+ // so that multiple modules using the same string
+ // can share it.
+ tmp.lit.S = s
+ namebuf = fmt.Sprintf("\"%v\"", Zconv(&tmp.lit, 0))
+ pkg = gostringpkg
+ }
+
+ sym = Pkglookup(namebuf, pkg)
+
+ // SymUniq flag indicates that data is generated already
+ if sym.Flags&SymUniq != 0 {
+ return sym
+ }
+ sym.Flags |= SymUniq
+ sym.Def = newname(sym)
+
+ off = 0
+
+ // string header
+ off = dsymptr(sym, off, sym, Widthptr+Widthint)
+ off = duintxx(sym, off, uint64(len(s)), Widthint)
+
+ // string data
+ for n = 0; n < len(s); n += m {
+ m = 8
+ if m > len(s)-n {
+ m = len(s) - n
+ }
+ off = dsname(sym, off, s[n:n+m])
+ }
+
+ off = duint8(sym, off, 0) // terminating NUL for runtime
+ off = (off + Widthptr - 1) &^ (Widthptr - 1) // round to pointer alignment
+ ggloblsym(sym, int32(off), obj.DUPOK|obj.RODATA)
+
+ return sym
+}
+
+var slicebytes_gen int
+
+func slicebytes(nam *Node, s string, len int) {
+ var off int
+ var n int
+ var m int
+ var sym *Sym
+
+ slicebytes_gen++
+ namebuf = fmt.Sprintf(".gobytes.%d", slicebytes_gen)
+ sym = Pkglookup(namebuf, localpkg)
+ sym.Def = newname(sym)
+
+ off = 0
+ for n = 0; n < len; n += m {
+ m = 8
+ if m > len-n {
+ m = len - n
+ }
+ off = dsname(sym, off, s[n:n+m])
+ }
+
+ ggloblsym(sym, int32(off), obj.NOPTR)
+
+ if nam.Op != ONAME {
+ Fatal("slicebytes %v", Nconv(nam, 0))
+ }
+ off = int(nam.Xoffset)
+ off = dsymptr(nam.Sym, off, sym, 0)
+ off = duintxx(nam.Sym, off, uint64(len), Widthint)
+ duintxx(nam.Sym, off, uint64(len), Widthint)
+}
+
+func dstringptr(s *Sym, off int, str string) int {
+ var p *obj.Prog
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+
+ Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Etype = Simtype[TINT]
+ off += Widthptr
+
+ return off
+}
+
+/*
+ * gobj.c
+ */
+func Datastring(s string, a *obj.Addr) {
+ var sym *Sym
+
+ sym = stringsym(s)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym)
+ a.Node = sym.Def
+ a.Offset = int64(Widthptr) + int64(Widthint) // skip header
+ a.Etype = Simtype[TINT]
+}
+
+func datagostring(sval *Strlit, a *obj.Addr) {
+ var sym *Sym
+
+ sym = stringsym(sval.S)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym)
+ a.Node = sym.Def
+ a.Offset = 0 // header
+ a.Etype = TSTRING
+}
+
+func dgostringptr(s *Sym, off int, str string) int {
+ var n int
+ var lit *Strlit
+
+ if str == "" {
+ return duintptr(s, off, 0)
+ }
+
+ n = len(str)
+ lit = new(Strlit)
+ lit.S = str
+ lit.S = lit.S[:n]
+ return dgostrlitptr(s, off, lit)
+}
+
+func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
+ var p *obj.Prog
+
+ if lit == nil {
+ return duintptr(s, off, 0)
+ }
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+ datagostring(lit, &p.To)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Etype = Simtype[TINT]
+ off += Widthptr
+
+ return off
+}
+
+func dsname(s *Sym, off int, t string) int {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = int64(off)
+ p.From.Sym = Linksym(s)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(len(t))
+
+ p.To.Type = obj.TYPE_SCONST
+ p.To.U.Sval = t
+ return off + len(t)
+}
+
+func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
+ var p *obj.Prog
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = Linksym(x)
+ p.To.Offset = int64(xoff)
+ off += Widthptr
+
+ return off
+}
+
+func gdata(nam *Node, nr *Node, wid int) {
+ var p *obj.Prog
+
+ if nr.Op == OLITERAL {
+ switch nr.Val.Ctype {
+ case CTCPLX:
+ gdatacomplex(nam, nr.Val.U.Cval)
+ return
+
+ case CTSTR:
+ gdatastring(nam, nr.Val.U.Sval)
+ return
+ }
+ }
+
+ p = Thearch.Gins(obj.ADATA, nam, nr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(wid)
+}
+
+func gdatacomplex(nam *Node, cval *Mpcplx) {
+ var p *obj.Prog
+ var w int
+
+ w = cplxsubtype(int(nam.Type.Etype))
+ w = int(Types[w].Width)
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(w)
+ p.To.Type = obj.TYPE_FCONST
+ p.To.U.Dval = mpgetflt(&cval.Real)
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(w)
+ p.From.Offset += int64(w)
+ p.To.Type = obj.TYPE_FCONST
+ p.To.U.Dval = mpgetflt(&cval.Imag)
+}
+
+func gdatastring(nam *Node, sval *Strlit) {
+ var p *obj.Prog
+ var nod1 Node
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ Datastring(sval.S, &p.To)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = Types[Tptr].Width
+ p.To.Type = obj.TYPE_ADDR
+
+ //print("%P\n", p);
+
+ Nodconst(&nod1, Types[TINT], int64(len(sval.S)))
+
+ p = Thearch.Gins(obj.ADATA, nam, &nod1)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthint)
+ p.From.Offset += int64(Widthptr)
+}
diff --git a/src/cmd/internal/gc/opnames.go b/src/cmd/internal/gc/opnames.go
new file mode 100644
index 0000000000..fc03ec6666
--- /dev/null
+++ b/src/cmd/internal/gc/opnames.go
@@ -0,0 +1,162 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// auto generated by go tool dist
+var opnames = []string{
+ OXXX: "XXX",
+ ONAME: "NAME",
+ ONONAME: "NONAME",
+ OTYPE: "TYPE",
+ OPACK: "PACK",
+ OLITERAL: "LITERAL",
+ OADD: "ADD",
+ OSUB: "SUB",
+ OOR: "OR",
+ OXOR: "XOR",
+ OADDSTR: "ADDSTR",
+ OADDR: "ADDR",
+ OANDAND: "ANDAND",
+ OAPPEND: "APPEND",
+ OARRAYBYTESTR: "ARRAYBYTESTR",
+ OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
+ OARRAYRUNESTR: "ARRAYRUNESTR",
+ OSTRARRAYBYTE: "STRARRAYBYTE",
+ OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
+ OSTRARRAYRUNE: "STRARRAYRUNE",
+ OAS: "AS",
+ OAS2: "AS2",
+ OAS2FUNC: "AS2FUNC",
+ OAS2RECV: "AS2RECV",
+ OAS2MAPR: "AS2MAPR",
+ OAS2DOTTYPE: "AS2DOTTYPE",
+ OASOP: "ASOP",
+ OCALL: "CALL",
+ OCALLFUNC: "CALLFUNC",
+ OCALLMETH: "CALLMETH",
+ OCALLINTER: "CALLINTER",
+ OCALLPART: "CALLPART",
+ OCAP: "CAP",
+ OCLOSE: "CLOSE",
+ OCLOSURE: "CLOSURE",
+ OCMPIFACE: "CMPIFACE",
+ OCMPSTR: "CMPSTR",
+ OCOMPLIT: "COMPLIT",
+ OMAPLIT: "MAPLIT",
+ OSTRUCTLIT: "STRUCTLIT",
+ OARRAYLIT: "ARRAYLIT",
+ OPTRLIT: "PTRLIT",
+ OCONV: "CONV",
+ OCONVIFACE: "CONVIFACE",
+ OCONVNOP: "CONVNOP",
+ OCOPY: "COPY",
+ ODCL: "DCL",
+ ODCLFUNC: "DCLFUNC",
+ ODCLFIELD: "DCLFIELD",
+ ODCLCONST: "DCLCONST",
+ ODCLTYPE: "DCLTYPE",
+ ODELETE: "DELETE",
+ ODOT: "DOT",
+ ODOTPTR: "DOTPTR",
+ ODOTMETH: "DOTMETH",
+ ODOTINTER: "DOTINTER",
+ OXDOT: "XDOT",
+ ODOTTYPE: "DOTTYPE",
+ ODOTTYPE2: "DOTTYPE2",
+ OEQ: "EQ",
+ ONE: "NE",
+ OLT: "LT",
+ OLE: "LE",
+ OGE: "GE",
+ OGT: "GT",
+ OIND: "IND",
+ OINDEX: "INDEX",
+ OINDEXMAP: "INDEXMAP",
+ OKEY: "KEY",
+ OPARAM: "PARAM",
+ OLEN: "LEN",
+ OMAKE: "MAKE",
+ OMAKECHAN: "MAKECHAN",
+ OMAKEMAP: "MAKEMAP",
+ OMAKESLICE: "MAKESLICE",
+ OMUL: "MUL",
+ ODIV: "DIV",
+ OMOD: "MOD",
+ OLSH: "LSH",
+ ORSH: "RSH",
+ OAND: "AND",
+ OANDNOT: "ANDNOT",
+ ONEW: "NEW",
+ ONOT: "NOT",
+ OCOM: "COM",
+ OPLUS: "PLUS",
+ OMINUS: "MINUS",
+ OOROR: "OROR",
+ OPANIC: "PANIC",
+ OPRINT: "PRINT",
+ OPRINTN: "PRINTN",
+ OPAREN: "PAREN",
+ OSEND: "SEND",
+ OSLICE: "SLICE",
+ OSLICEARR: "SLICEARR",
+ OSLICESTR: "SLICESTR",
+ OSLICE3: "SLICE3",
+ OSLICE3ARR: "SLICE3ARR",
+ ORECOVER: "RECOVER",
+ ORECV: "RECV",
+ ORUNESTR: "RUNESTR",
+ OSELRECV: "SELRECV",
+ OSELRECV2: "SELRECV2",
+ OIOTA: "IOTA",
+ OREAL: "REAL",
+ OIMAG: "IMAG",
+ OCOMPLEX: "COMPLEX",
+ OBLOCK: "BLOCK",
+ OBREAK: "BREAK",
+ OCASE: "CASE",
+ OXCASE: "XCASE",
+ OCONTINUE: "CONTINUE",
+ ODEFER: "DEFER",
+ OEMPTY: "EMPTY",
+ OFALL: "FALL",
+ OXFALL: "XFALL",
+ OFOR: "FOR",
+ OGOTO: "GOTO",
+ OIF: "IF",
+ OLABEL: "LABEL",
+ OPROC: "PROC",
+ ORANGE: "RANGE",
+ ORETURN: "RETURN",
+ OSELECT: "SELECT",
+ OSWITCH: "SWITCH",
+ OTYPESW: "TYPESW",
+ OTCHAN: "TCHAN",
+ OTMAP: "TMAP",
+ OTSTRUCT: "TSTRUCT",
+ OTINTER: "TINTER",
+ OTFUNC: "TFUNC",
+ OTARRAY: "TARRAY",
+ ODDD: "DDD",
+ ODDDARG: "DDDARG",
+ OINLCALL: "INLCALL",
+ OEFACE: "EFACE",
+ OITAB: "ITAB",
+ OSPTR: "SPTR",
+ OCLOSUREVAR: "CLOSUREVAR",
+ OCFUNC: "CFUNC",
+ OCHECKNIL: "CHECKNIL",
+ OVARKILL: "VARKILL",
+ OREGISTER: "REGISTER",
+ OINDREG: "INDREG",
+ OCMP: "CMP",
+ ODEC: "DEC",
+ OINC: "INC",
+ OEXTEND: "EXTEND",
+ OHMUL: "HMUL",
+ OLROT: "LROT",
+ ORROTC: "RROTC",
+ ORETJMP: "RETJMP",
+ OEND: "END",
+}
diff --git a/src/cmd/internal/gc/order.go b/src/cmd/internal/gc/order.go
new file mode 100644
index 0000000000..6f18952334
--- /dev/null
+++ b/src/cmd/internal/gc/order.go
@@ -0,0 +1,1188 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite x op= y into x = x op y.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// Order holds state during the ordering process.
+type Order struct {
+ out *NodeList
+ temp *NodeList
+ free *NodeList
+}
+
+// Order rewrites fn->nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *Node) {
+ var s string
+
+ if Debug['W'] > 1 {
+ s = fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Nbody)
+ }
+
+ orderblock(&fn.Nbody)
+}
+
+// Ordertemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, ordertemp emits code to zero the temporary.
+func ordertemp(t *Type, order *Order, clear int) *Node {
+ var var_ *Node
+ var a *Node
+ var l *NodeList
+
+ var_ = temp(t)
+ if clear != 0 {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ }
+
+ l = order.free
+ if l == nil {
+ l = new(NodeList)
+ }
+ order.free = l.Next
+ l.Next = order.temp
+ l.N = var_
+ order.temp = l
+ return var_
+}
+
+// Ordercopyexpr behaves like ordertemp but also emits
+// code to initialize the temporary to the value n.
+//
+// The clear argument is provided for use when the evaluation
+// of tmp = n turns into a function call that is passed a pointer
+// to the temporary as the output space. If the call blocks before
+// tmp has been written, the garbage collector will still treat the
+// temporary as live, so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
+ var a *Node
+ var var_ *Node
+
+ var_ = ordertemp(t, order, clear)
+ a = Nod(OAS, var_, n)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ return var_
+}
+
+// Ordercheapexpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, ordercheapexpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func ordercheapexpr(n *Node, order *Order) *Node {
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+ }
+
+ return ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Ordersafeexpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func ordersafeexpr(n *Node, order *Order) *Node {
+ var l *Node
+ var r *Node
+ var a *Node
+
+ switch n.Op {
+ default:
+ Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case ONAME,
+ OLITERAL:
+ return n
+
+ case ODOT:
+ l = ordersafeexpr(n.Left, order)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ typecheck(&a, Erv)
+ return a
+
+ case ODOTPTR,
+ OIND:
+ l = ordercheapexpr(n.Left, order)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ typecheck(&a, Erv)
+ return a
+
+ case OINDEX,
+ OINDEXMAP:
+ if Isfixedarray(n.Left.Type) != 0 {
+ l = ordersafeexpr(n.Left, order)
+ } else {
+ l = ordercheapexpr(n.Left, order)
+ }
+ r = ordercheapexpr(n.Right, order)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ a.Right = r
+ typecheck(&a, Erv)
+ return a
+ }
+}
+
+// Istemp reports whether n is a temporary variable.
+func istemp(n *Node) int {
+ if n.Op != ONAME {
+ return 0
+ }
+ return bool2int(strings.HasPrefix(n.Sym.Name, "autotmp_"))
+}
+
+// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n *Node) int {
+ return bool2int(islvalue(n) != 0 && (n.Op != ONAME || n.Class == PEXTERN || istemp(n) != 0))
+}
+
+// Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
+// If the original argument *np is not okay, orderaddrtemp creates a tmp, emits
+// tmp = *np, and then sets *np to the tmp variable.
+func orderaddrtemp(np **Node, order *Order) {
+ var n *Node
+
+ n = *np
+ if isaddrokay(n) != 0 {
+ return
+ }
+ *np = ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Marktemp returns the top of the temporary variable stack.
+func marktemp(order *Order) *NodeList {
+ return order.temp
+}
+
+// Poptemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by marktemp.
+func poptemp(mark *NodeList, order *Order) {
+ var l *NodeList
+
+ for {
+ l = order.temp
+ if !(l != mark) {
+ break
+ }
+ order.temp = l.Next
+ l.Next = order.free
+ order.free = l
+ }
+}
+
+// Cleantempnopop emits to *out VARKILL instructions for each temporary
+// above the mark on the temporary stack, but it does not pop them
+// from the stack.
+func cleantempnopop(mark *NodeList, order *Order, out **NodeList) {
+ var l *NodeList
+ var kill *Node
+
+ for l = order.temp; l != mark; l = l.Next {
+ kill = Nod(OVARKILL, l.N, nil)
+ typecheck(&kill, Etop)
+ *out = list(*out, kill)
+ }
+}
+
+// Cleantemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func cleantemp(top *NodeList, order *Order) {
+ cleantempnopop(top, order, &order.out)
+ poptemp(top, order)
+}
+
+// Orderstmtlist orders each of the statements in the list.
+func orderstmtlist(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderstmt(l.N, order)
+ }
+}
+
+// Orderblock orders the block of statements *l onto a new list,
+// and then replaces *l with that list.
+func orderblock(l **NodeList) {
+ var order Order
+ var mark *NodeList
+
+ order = Order{}
+ mark = marktemp(&order)
+ orderstmtlist(*l, &order)
+ cleantemp(mark, &order)
+ *l = order.out
+}
+
+// Orderexprinplace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+func orderexprinplace(np **Node, outer *Order) {
+ var n *Node
+ var lp **NodeList
+ var order Order
+
+ n = *np
+ order = Order{}
+ orderexpr(&n, &order)
+ addinit(&n, order.out)
+
+ // insert new temporaries from order
+ // at head of outer list.
+ lp = &order.temp
+
+ for *lp != nil {
+ lp = &(*lp).Next
+ }
+ *lp = outer.temp
+ outer.temp = order.temp
+
+ *np = n
+}
+
+// Orderstmtinplace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+func orderstmtinplace(np **Node) {
+ var n *Node
+ var order Order
+ var mark *NodeList
+
+ n = *np
+ order = Order{}
+ mark = marktemp(&order)
+ orderstmt(n, &order)
+ cleantemp(mark, &order)
+ *np = liststmt(order.out)
+}
+
+// Orderinit moves n's init list to order->out.
+func orderinit(n *Node, order *Order) {
+ orderstmtlist(n.Ninit, order)
+ n.Ninit = nil
+}
+
+// Ismulticall reports whether the list l is f() for a multi-value function.
+// Such an f() could appear as the lone argument to a multi-arg function.
+func ismulticall(l *NodeList) int {
+ var n *Node
+
+ // one arg only
+ if l == nil || l.Next != nil {
+ return 0
+ }
+ n = l.N
+
+ // must be call
+ switch n.Op {
+ default:
+ return 0
+
+ case OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ break
+ }
+
+ // call must return multiple values
+ return bool2int(n.Left.Type.Outtuple > 1)
+}
+
+// Copyret emits t1, t2, ... = n, where n is a function call,
+// and then returns the list t1, t2, ....
+func copyret(n *Node, order *Order) *NodeList {
+ var t *Type
+ var tmp *Node
+ var as *Node
+ var l1 *NodeList
+ var l2 *NodeList
+ var tl Iter
+
+ if n.Type.Etype != TSTRUCT || !(n.Type.Funarg != 0) {
+ Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
+ }
+
+ l1 = nil
+ l2 = nil
+ for t = Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
+ tmp = temp(t.Type)
+ l1 = list(l1, tmp)
+ l2 = list(l2, tmp)
+ }
+
+ as = Nod(OAS2, nil, nil)
+ as.List = l1
+ as.Rlist = list1(n)
+ typecheck(&as, Etop)
+ orderstmt(as, order)
+
+ return l2
+}
+
+// Ordercallargs orders the list of call arguments *l.
+func ordercallargs(l **NodeList, order *Order) {
+ if ismulticall(*l) != 0 {
+ // return f() where f() is multiple values.
+ *l = copyret((*l).N, order)
+ } else {
+ orderexprlist(*l, order)
+ }
+}
+
+// Ordercall orders the call expression n.
+// n->op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func ordercall(n *Node, order *Order) {
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order) // ODDDARG temp
+ ordercallargs(&n.List, order)
+}
+
+// Ordermapassign appends n to order->out, introducing temporaries
+// to make sure that all map assignments have the form m[k] = x,
+// where x is adressable.
+// (Orderexpr has already been called on n, so we know k is addressable.)
+//
+// If n is m[k] = x where x is not addressable, the rewrite is:
+// tmp = x
+// m[k] = tmp
+//
+// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is
+// t1 = m
+// t2 = k
+// ...., t3, ... = x
+// t1[t2] = t3
+//
+// The temporaries t1, t2 are needed in case the ... being assigned
+// contain m or k. They are usually unnecessary, but in the unnecessary
+// cases they are also typically registerizable, so not much harm done.
+// And this only applies to the multiple-assignment form.
+// We could do a more precise analysis if needed, like in walk.c.
+//
+// Ordermapassign also inserts these temporaries if needed for
+// calling writebarrierfat with a pointer to n->right.
+func ordermapassign(n *Node, order *Order) {
+ var m *Node
+ var a *Node
+ var l *NodeList
+ var post *NodeList
+
+ switch n.Op {
+ default:
+ Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case OAS:
+ order.out = list(order.out, n)
+
+ // We call writebarrierfat only for values > 4 pointers long. See walk.c.
+ if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) != 0 && n.Left.Type.Width > int64(4*Widthptr))) && !(isaddrokay(n.Right) != 0) {
+ m = n.Left
+ n.Left = ordertemp(m.Type, order, 0)
+ a = Nod(OAS, m, n.Left)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ }
+
+ case OAS2,
+ OAS2DOTTYPE,
+ OAS2MAPR,
+ OAS2FUNC:
+ post = nil
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINDEXMAP {
+ m = l.N
+ if !(istemp(m.Left) != 0) {
+ m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
+ }
+ if !(istemp(m.Right) != 0) {
+ m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
+ }
+ l.N = ordertemp(m.Type, order, 0)
+ a = Nod(OAS, m, l.N)
+ typecheck(&a, Etop)
+ post = list(post, a)
+ }
+ }
+
+ order.out = list(order.out, n)
+ order.out = concat(order.out, post)
+ }
+}
+
+// Orderstmt orders the statement n, appending to order->out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func orderstmt(n *Node, order *Order) {
+ var lno int
+ var l *NodeList
+ var t *NodeList
+ var t1 *NodeList
+ var r *Node
+ var tmp1 *Node
+ var tmp2 *Node
+ var np **Node
+ var ch *Type
+ var typ *Type
+
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ orderinit(n, order)
+
+ switch n.Op {
+ default:
+ Fatal("orderstmt %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case OVARKILL:
+ order.out = list(order.out, n)
+
+ case OAS,
+ OAS2,
+ OCLOSE,
+ OCOPY,
+ OPRINT,
+ OPRINTN,
+ ORECOVER,
+ ORECV:
+ t = marktemp(order)
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+ switch n.Op {
+ case OAS,
+ OAS2,
+ OAS2DOTTYPE:
+ ordermapassign(n, order)
+
+ default:
+ order.out = list(order.out, n)
+ }
+
+ cleantemp(t, order)
+
+ // Special: rewrite l op= r into l = l op r.
+ // This simplies quite a few operations;
+ // most important is that it lets us separate
+ // out map read from map write when l is
+ // a map index expression.
+ case OASOP:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ n.Left = ordersafeexpr(n.Left, order)
+ tmp1 = treecopy(n.Left)
+ if tmp1.Op == OINDEXMAP {
+ tmp1.Etype = 0 // now an rvalue not an lvalue
+ }
+ tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
+ n.Right = Nod(int(n.Etype), tmp1, n.Right)
+ typecheck(&n.Right, Erv)
+ orderexpr(&n.Right, order)
+ n.Etype = 0
+ n.Op = OAS
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: make sure key is addressable,
+ // and make sure OINDEXMAP is not copied out.
+ case OAS2MAPR:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ r = n.Rlist.N
+ orderexpr(&r.Left, order)
+ orderexpr(&r.Right, order)
+
+ // See case OINDEXMAP below.
+ if r.Right.Op == OARRAYBYTESTR {
+ r.Right.Op = OARRAYBYTESTRTMP
+ }
+ orderaddrtemp(&r.Right, order)
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: avoid copy of func call n->rlist->n.
+ case OAS2FUNC:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ ordercall(n.Rlist.N, order)
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: use temporary variables to hold result,
+ // so that assertI2Tetc can take address of temporary.
+ // No temporary for blank assignment.
+ case OAS2DOTTYPE:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ orderexpr(&n.Rlist.N.Left, order) // i in i.(T)
+ if isblank(n.List.N) {
+ order.out = list(order.out, n)
+ } else {
+ typ = n.Rlist.N.Type
+ tmp1 = ordertemp(typ, order, bool2int(haspointers(typ)))
+ order.out = list(order.out, n)
+ r = Nod(OAS, n.List.N, tmp1)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ n.List = list(list1(tmp1), n.List.Next.N)
+ }
+
+ cleantemp(t, order)
+
+ // Special: use temporary variables to hold result,
+ // so that chanrecv can take address of temporary.
+ case OAS2RECV:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ orderexpr(&n.Rlist.N.Left, order) // arg to recv
+ ch = n.Rlist.N.Left.Type
+ tmp1 = ordertemp(ch.Type, order, bool2int(haspointers(ch.Type)))
+ if !isblank(n.List.Next.N) {
+ tmp2 = ordertemp(n.List.Next.N.Type, order, 0)
+ } else {
+ tmp2 = ordertemp(Types[TBOOL], order, 0)
+ }
+ order.out = list(order.out, n)
+ r = Nod(OAS, n.List.N, tmp1)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ r = Nod(OAS, n.List.Next.N, tmp2)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ n.List = list(list1(tmp1), tmp2)
+ cleantemp(t, order)
+
+ // Special: does not save n onto out.
+ case OBLOCK,
+ OEMPTY:
+ orderstmtlist(n.List, order)
+
+ // Special: n->left is not an expression; save as is.
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ ODCLCONST,
+ ODCLTYPE,
+ OFALL,
+ OXFALL,
+ OGOTO,
+ OLABEL,
+ ORETJMP:
+ order.out = list(order.out, n)
+
+ // Special: handle call arguments.
+ case OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ t = marktemp(order)
+
+ ordercall(n, order)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Special: order arguments to inner call but not call itself.
+ case ODEFER,
+ OPROC:
+ t = marktemp(order)
+
+ switch n.Left.Op {
+ // Delete will take the address of the key.
+ // Copy key into new temp and do not clean it
+ // (it persists beyond the statement).
+ case ODELETE:
+ orderexprlist(n.Left.List, order)
+
+ t1 = marktemp(order)
+ np = &n.Left.List.Next.N // map key
+ *np = ordercopyexpr(*np, (*np).Type, order, 0)
+ poptemp(t1, order)
+
+ default:
+ ordercall(n.Left, order)
+ }
+
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ case ODELETE:
+ t = marktemp(order)
+ orderexpr(&n.List.N, order)
+ orderexpr(&n.List.Next.N, order)
+ orderaddrtemp(&n.List.Next.N, order) // map key
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Clean temporaries from condition evaluation at
+ // beginning of loop body and after for statement.
+ case OFOR:
+ t = marktemp(order)
+
+ orderexprinplace(&n.Ntest, order)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nbody = concat(l, n.Nbody)
+ orderblock(&n.Nbody)
+ orderstmtinplace(&n.Nincr)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Clean temporaries from condition at
+ // beginning of both branches.
+ case OIF:
+ t = marktemp(order)
+
+ orderexprinplace(&n.Ntest, order)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nbody = concat(l, n.Nbody)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nelse = concat(l, n.Nelse)
+ poptemp(t, order)
+ orderblock(&n.Nbody)
+ orderblock(&n.Nelse)
+ order.out = list(order.out, n)
+
+ // Special: argument will be converted to interface using convT2E
+ // so make sure it is an addressable temporary.
+ case OPANIC:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ if !(Isinter(n.Left.Type) != 0) {
+ orderaddrtemp(&n.Left, order)
+ }
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // n->right is the expression being ranged over.
+ // order it, and then make a copy if we need one.
+ // We almost always do, to ensure that we don't
+ // see any value changes made during the loop.
+ // Usually the copy is cheap (e.g., array pointer, chan, slice, string are all tiny).
+ // The exception is ranging over an array value (not a slice, not a pointer to array),
+ // which must make a copy to avoid seeing updates made during
+ // the range body. Ranging over an array value is uncommon though.
+ case ORANGE:
+ t = marktemp(order)
+
+ orderexpr(&n.Right, order)
+ switch n.Type.Etype {
+ default:
+ Fatal("orderstmt range %v", Tconv(n.Type, 0))
+ fallthrough
+
+ // Mark []byte(str) range expression to reuse string backing storage.
+ // It is safe because the storage cannot be mutated.
+ case TARRAY:
+ if n.Right.Op == OSTRARRAYBYTE {
+ n.Right.Op = OSTRARRAYBYTETMP
+ }
+ if count(n.List) < 2 || isblank(n.List.Next.N) {
+ // for i := range x will only use x once, to compute len(x).
+ // No need to copy it.
+ break
+ }
+ fallthrough
+
+ // chan, string, slice, array ranges use value multiple times.
+ // make copy.
+ // fall through
+ case TCHAN,
+ TSTRING:
+ r = n.Right
+
+ if r.Type.Etype == TSTRING && r.Type != Types[TSTRING] {
+ r = Nod(OCONV, r, nil)
+ r.Type = Types[TSTRING]
+ typecheck(&r, Erv)
+ }
+
+ n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+ // copy the map value in case it is a map literal.
+ // TODO(rsc): Make tmp = literal expressions reuse tmp.
+ // For maps tmp is just one word so it hardly matters.
+ case TMAP:
+ r = n.Right
+
+ n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+ // n->alloc is the temp for the iterator.
+ n.Alloc = ordertemp(Types[TUINT8], order, 1)
+ }
+
+ for l = n.List; l != nil; l = l.Next {
+ orderexprinplace(&l.N, order)
+ }
+ orderblock(&n.Nbody)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ case ORETURN:
+ ordercallargs(&n.List, order)
+ order.out = list(order.out, n)
+
+ // Special: clean case temporaries in each block entry.
+ // Select must enter one of its blocks, so there is no
+ // need for a cleaning at the end.
+ // Doubly special: evaluation order for select is stricter
+ // than ordinary expressions. Even something like p.c
+ // has to be hoisted into a temporary, so that it cannot be
+ // reordered after the channel evaluation for a different
+ // case (if p were nil, then the timing of the fault would
+ // give this away).
+ case OSELECT:
+ t = marktemp(order)
+
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op != OXCASE {
+ Fatal("order select case %v", Oconv(int(l.N.Op), 0))
+ }
+ r = l.N.Left
+ setlineno(l.N)
+
+ // Append any new body prologue to ninit.
+ // The next loop will insert ninit into nbody.
+ if l.N.Ninit != nil {
+ Fatal("order select ninit")
+ }
+ if r != nil {
+ switch r.Op {
+ default:
+ Yyerror("unknown op in select %v", Oconv(int(r.Op), 0))
+ Dump("select case", r)
+
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ case OSELRECV,
+ OSELRECV2:
+ if r.Colas != 0 {
+ t = r.Ninit
+ if t != nil && t.N.Op == ODCL && t.N.Left == r.Left {
+ t = t.Next
+ }
+ if t != nil && t.N.Op == ODCL && t.N.Left == r.Ntest {
+ t = t.Next
+ }
+ if t == nil {
+ r.Ninit = nil
+ }
+ }
+
+ if r.Ninit != nil {
+ Yyerror("ninit on select recv")
+ dumplist("ninit", r.Ninit)
+ }
+
+ // case x = <-c
+ // case x, ok = <-c
+ // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
+ // r->left == N means 'case <-c'.
+ // c is always evaluated; x and ok are only evaluated when assigned.
+ orderexpr(&r.Right.Left, order)
+
+ if r.Right.Left.Op != ONAME {
+ r.Right.Left = ordercopyexpr(r.Right.Left, r.Right.Left.Type, order, 0)
+ }
+
+ // Introduce temporary for receive and move actual copy into case body.
+ // avoids problems with target being addressed, as usual.
+ // NOTE: If we wanted to be clever, we could arrange for just one
+ // temporary per distinct type, sharing the temp among all receives
+ // with that temp. Similarly one ok bool could be shared among all
+ // the x,ok receives. Not worth doing until there's a clear need.
+ if r.Left != nil && isblank(r.Left) {
+ r.Left = nil
+ }
+ if r.Left != nil {
+ // use channel element type for temporary to avoid conversions,
+ // such as in case interfacevalue = <-intchan.
+ // the conversion happens in the OAS instead.
+ tmp1 = r.Left
+
+ if r.Colas != 0 {
+ tmp2 = Nod(ODCL, tmp1, nil)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ r.Left = ordertemp(r.Right.Left.Type.Type, order, bool2int(haspointers(r.Right.Left.Type.Type)))
+ tmp2 = Nod(OAS, tmp1, r.Left)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ if r.Ntest != nil && isblank(r.Ntest) {
+ r.Ntest = nil
+ }
+ if r.Ntest != nil {
+ tmp1 = r.Ntest
+ if r.Colas != 0 {
+ tmp2 = Nod(ODCL, tmp1, nil)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ r.Ntest = ordertemp(tmp1.Type, order, 0)
+ tmp2 = Nod(OAS, tmp1, r.Ntest)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ orderblock(&l.N.Ninit)
+
+ case OSEND:
+ if r.Ninit != nil {
+ Yyerror("ninit on select send")
+ dumplist("ninit", r.Ninit)
+ }
+
+ // case c <- x
+ // r->left is c, r->right is x, both are always evaluated.
+ orderexpr(&r.Left, order)
+
+ if !(istemp(r.Left) != 0) {
+ r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
+ }
+ orderexpr(&r.Right, order)
+ if !(istemp(r.Right) != 0) {
+ r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
+ }
+ }
+ }
+
+ orderblock(&l.N.Nbody)
+ }
+
+ // Now that we have accumulated all the temporaries, clean them.
+ // Also insert any ninit queued during the previous loop.
+ // (The temporary cleaning must follow that ninit work.)
+ for l = n.List; l != nil; l = l.Next {
+ cleantempnopop(t, order, &l.N.Ninit)
+ l.N.Nbody = concat(l.N.Ninit, l.N.Nbody)
+ l.N.Ninit = nil
+ }
+
+ order.out = list(order.out, n)
+ poptemp(t, order)
+
+ // Special: value being sent is passed as a pointer; make it addressable.
+ case OSEND:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderaddrtemp(&n.Right, order)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // TODO(rsc): Clean temporaries more aggressively.
+ // Note that because walkswitch will rewrite some of the
+ // switch into a binary search, this is not as easy as it looks.
+ // (If we ran that code here we could invoke orderstmt on
+ // the if-else chain instead.)
+ // For now just clean all the temporaries at the end.
+ // In practice that's fine.
+ case OSWITCH:
+ t = marktemp(order)
+
+ orderexpr(&n.Ntest, order)
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op != OXCASE {
+ Fatal("order switch case %v", Oconv(int(l.N.Op), 0))
+ }
+ orderexprlistinplace(l.N.List, order)
+ orderblock(&l.N.Nbody)
+ }
+
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+ }
+
+ lineno = int32(lno)
+}
+
+// Orderexprlist orders the expression list l into order.
+func orderexprlist(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderexpr(&l.N, order)
+ }
+}
+
+// Orderexprlist orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func orderexprlistinplace(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderexprinplace(&l.N, order)
+ }
+}
+
+// Orderexpr orders a single expression, appending side
+// effects to order->out as needed.
+func orderexpr(np **Node, order *Order) {
+ var n *Node
+ var mark *NodeList
+ var l *NodeList
+ var t *Type
+ var lno int
+ var haslit int
+ var hasbyte int
+
+ n = *np
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+ orderinit(n, order)
+
+ switch n.Op {
+ default:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+
+ // Addition of strings turns into a function call.
+ // Allocate a temporary to hold the strings.
+ // Fewer than 5 strings use direct runtime helpers.
+ case OADDSTR:
+ orderexprlist(n.List, order)
+
+ if count(n.List) > 5 {
+ t = typ(TARRAY)
+ t.Bound = int64(count(n.List))
+ t.Type = Types[TSTRING]
+ n.Alloc = ordertemp(t, order, 0)
+ }
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String concatenation does not
+ // memorize the strings for later use, so it is safe.
+ // However, we can do it only if there is at least one non-empty string literal.
+ // Otherwise if all other arguments are empty strings,
+ // concatstrings will return the reference to the temp string
+ // to the caller.
+ hasbyte = 0
+
+ haslit = 0
+ for l = n.List; l != nil; l = l.Next {
+ hasbyte |= bool2int(l.N.Op == OARRAYBYTESTR)
+ haslit |= bool2int(l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0)
+ }
+
+ if haslit != 0 && hasbyte != 0 {
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OARRAYBYTESTR {
+ l.N.Op = OARRAYBYTESTRTMP
+ }
+ }
+ }
+
+ case OCMPSTR:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String comparison does not
+ // memorize the strings for later use, so it is safe.
+ if n.Left.Op == OARRAYBYTESTR {
+ n.Left.Op = OARRAYBYTESTRTMP
+ }
+ if n.Right.Op == OARRAYBYTESTR {
+ n.Right.Op = OARRAYBYTESTRTMP
+ }
+
+ // key must be addressable
+ case OINDEXMAP:
+ orderexpr(&n.Left, order)
+
+ orderexpr(&n.Right, order)
+
+ // For x = m[string(k)] where k is []byte, the allocation of
+ // backing bytes for the string can be avoided by reusing
+ // the []byte backing array. This is a special case that it
+ // would be nice to handle more generally, but because
+ // there are no []byte-keyed maps, this specific case comes
+ // up in important cases in practice. See issue 3512.
+ // Nothing can change the []byte we are not copying before
+ // the map index, because the map access is going to
+ // be forced to happen immediately following this
+ // conversion (by the ordercopyexpr a few lines below).
+ if n.Etype == 0 && n.Right.Op == OARRAYBYTESTR {
+ n.Right.Op = OARRAYBYTESTRTMP
+ }
+
+ orderaddrtemp(&n.Right, order)
+ if n.Etype == 0 {
+ // use of value (not being assigned);
+ // make copy in temporary.
+ n = ordercopyexpr(n, n.Type, order, 0)
+ }
+
+ // concrete type (not interface) argument must be addressable
+ // temporary to pass to runtime.
+ case OCONVIFACE:
+ orderexpr(&n.Left, order)
+
+ if !(Isinter(n.Left.Type) != 0) {
+ orderaddrtemp(&n.Left, order)
+ }
+
+ case OANDAND,
+ OOROR:
+ mark = marktemp(order)
+ orderexpr(&n.Left, order)
+
+ // Clean temporaries from first branch at beginning of second.
+ // Leave them on the stack so that they can be killed in the outer
+ // context in case the short circuit is taken.
+ l = nil
+
+ cleantempnopop(mark, order, &l)
+ n.Right.Ninit = concat(l, n.Right.Ninit)
+ orderexprinplace(&n.Right, order)
+
+ case OAPPEND,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCOMPLEX,
+ OCOPY,
+ OIMAG,
+ OLEN,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ ONEW,
+ OREAL,
+ ORECOVER:
+ ordercall(n, order)
+ n = ordercopyexpr(n, n.Type, order, 0)
+
+ case OCLOSURE:
+ if n.Noescape && n.Cvars != nil {
+ n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+ }
+
+ case OARRAYLIT,
+ OCALLPART:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+ if n.Noescape {
+ n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+ }
+
+ case ODDDARG:
+ if n.Noescape {
+ // The ddd argument does not live beyond the call it is created for.
+ // Allocate a temporary that will be cleaned up when this statement
+ // completes. We could be more aggressive and try to arrange for it
+ // to be cleaned up when the call completes.
+ n.Alloc = ordertemp(n.Type.Type, order, 0)
+ }
+
+ case ORECV,
+ ODOTTYPE:
+ orderexpr(&n.Left, order)
+ n = ordercopyexpr(n, n.Type, order, 1)
+
+ case OEQ,
+ ONE:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ t = n.Left.Type
+ if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+ // for complex comparisons, we need both args to be
+ // addressable so we can pass them to the runtime.
+ orderaddrtemp(&n.Left, order)
+
+ orderaddrtemp(&n.Right, order)
+ }
+ }
+
+ lineno = int32(lno)
+
+ *np = n
+}
diff --git a/src/cmd/internal/gc/pgen.go b/src/cmd/internal/gc/pgen.go
new file mode 100644
index 0000000000..052d7dc60b
--- /dev/null
+++ b/src/cmd/internal/gc/pgen.go
@@ -0,0 +1,597 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// "Portable" code generation.
+// Compiled separately for 5g, 6g, and 8g, so allowed to use gg.h, opt.h.
+// Must code to the intersection of the three back ends.
+
+//#include "opt.h"
+
+var makefuncdatasym_nsym int32
+
+func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
+ var nod Node
+ var pnod *Node
+ var sym *Sym
+
+ namebuf = fmt.Sprintf(namefmt, makefuncdatasym_nsym)
+ makefuncdatasym_nsym++
+ sym = Lookup(namebuf)
+ pnod = newname(sym)
+ pnod.Class = PEXTERN
+ Nodconst(&nod, Types[TINT32], funcdatakind)
+ Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
+ return sym
+}
+
+// gvardef inserts a VARDEF for n into the instruction stream.
+// VARDEF is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, gvardef is usually only called for multi-word
+// or 'fat' variables, those satisfying isfat(n->type).
+// However, gvardef is also called when a non-fat variable is initialized
+// via a block move; the only time this happens is when you have
+// return f()
+// for a function with multiple return values exactly matching the return
+// types of the current function.
+//
+// A 'VARDEF x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The VARDEF must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// VARDEF x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// VARDEF x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// VARDEF x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the VARDEF appears to have "overwritten" it.
+//
+// VARDEF is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
+// even if its address has been taken. That is, a VARKILL annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+func gvardefx(n *Node, as int) {
+ if n == nil {
+ Fatal("gvardef nil")
+ }
+ if n.Op != ONAME {
+ Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), Nconv(n, 0))
+ return
+ }
+
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ Thearch.Gins(as, nil, n)
+ }
+}
+
+func Gvardef(n *Node) {
+ gvardefx(n, obj.AVARDEF)
+}
+
+func gvarkill(n *Node) {
+ gvardefx(n, obj.AVARKILL)
+}
+
+func removevardef(firstp *obj.Prog) {
+ var p *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.U.Branch != nil && (p.To.U.Branch.As == obj.AVARDEF || p.To.U.Branch.As == obj.AVARKILL) {
+ p.To.U.Branch = p.To.U.Branch.Link
+ }
+ }
+ }
+}
+
+func gcsymdup(s *Sym) {
+ var ls *obj.LSym
+ var lo uint64
+ var hi uint64
+
+ ls = Linksym(s)
+ if len(ls.R) > 0 {
+ Fatal("cannot rosymdup %s with relocations", ls.Name)
+ }
+ var d MD5
+ md5reset(&d)
+ md5write(&d, ls.P, len(ls.P))
+ lo = md5sum(&d, &hi)
+ ls.Name = fmt.Sprintf("gclocals·%016x%016x", lo, hi)
+ ls.Dupok = 1
+}
+
+func emitptrargsmap() {
+ var nptr int
+ var nbitmap int
+ var j int
+ var off int
+ var xoffset int64
+ var bv *Bvec
+ var sym *Sym
+
+ sym = Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
+
+ nptr = int(Curfn.Type.Argwid / int64(Widthptr))
+ bv = bvalloc(int32(nptr) * 2)
+ nbitmap = 1
+ if Curfn.Type.Outtuple > 0 {
+ nbitmap = 2
+ }
+ off = duint32(sym, 0, uint32(nbitmap))
+ off = duint32(sym, off, uint32(bv.n))
+ if Curfn.Type.Thistuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
+ }
+
+ if Curfn.Type.Intuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
+ }
+
+ for j = 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ if Curfn.Type.Outtuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
+ for j = 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ }
+
+ ggloblsym(sym, int32(off), obj.RODATA)
+}
+
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvar(a *Node, b *Node) int {
+ var ap int
+ var bp int
+
+ if a.Class != b.Class {
+ if a.Class == PAUTO {
+ return +1
+ }
+ return -1
+ }
+
+ if a.Class != PAUTO {
+ if a.Xoffset < b.Xoffset {
+ return -1
+ }
+ if a.Xoffset > b.Xoffset {
+ return +1
+ }
+ return 0
+ }
+
+ if (a.Used == 0) != (b.Used == 0) {
+ return int(b.Used) - int(a.Used)
+ }
+
+ ap = bool2int(haspointers(a.Type))
+ bp = bool2int(haspointers(b.Type))
+ if ap != bp {
+ return bp - ap
+ }
+
+ ap = int(a.Needzero)
+ bp = int(b.Needzero)
+ if ap != bp {
+ return bp - ap
+ }
+
+ if a.Type.Width < b.Type.Width {
+ return +1
+ }
+ if a.Type.Width > b.Type.Width {
+ return -1
+ }
+
+ return stringsCompare(a.Sym.Name, b.Sym.Name)
+}
+
+// TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
+func allocauto(ptxt *obj.Prog) {
+ var ll *NodeList
+ var n *Node
+ var w int64
+
+ Stksize = 0
+ stkptrsize = 0
+
+ if Curfn.Dcl == nil {
+ return
+ }
+
+ // Mark the PAUTO's unused.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class == PAUTO {
+ ll.N.Used = 0
+ }
+ }
+
+ markautoused(ptxt)
+
+ listsort(&Curfn.Dcl, cmpstackvar)
+
+ // Unused autos are at the end, chop 'em off.
+ ll = Curfn.Dcl
+
+ n = ll.N
+ if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+ // No locals used at all
+ Curfn.Dcl = nil
+
+ fixautoused(ptxt)
+ return
+ }
+
+ for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
+ n = ll.Next.N
+ if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+ ll.Next = nil
+ Curfn.Dcl.End = ll
+ break
+ }
+ }
+
+ // Reassign stack offsets of the locals that are still there.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ n = ll.N
+ if n.Class != PAUTO || n.Op != ONAME {
+ continue
+ }
+
+ dowidth(n.Type)
+ w = n.Type.Width
+ if w >= Thearch.MAXWIDTH || w < 0 {
+ Fatal("bad width")
+ }
+ Stksize += w
+ Stksize = Rnd(Stksize, int64(n.Type.Align))
+ if haspointers(n.Type) {
+ stkptrsize = Stksize
+ }
+ if Thearch.Thechar == '5' || Thearch.Thechar == '9' {
+ Stksize = Rnd(Stksize, int64(Widthptr))
+ }
+ if Stksize >= 1<<31 {
+ setlineno(Curfn)
+ Yyerror("stack frame too large (>2GB)")
+ }
+
+ n.Stkdelta = -Stksize - n.Xoffset
+ }
+
+ Stksize = Rnd(Stksize, int64(Widthreg))
+ stkptrsize = Rnd(stkptrsize, int64(Widthreg))
+
+ fixautoused(ptxt)
+
+ // The debug information needs accurate offsets on the symbols.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class != PAUTO || ll.N.Op != ONAME {
+ continue
+ }
+ ll.N.Xoffset += ll.N.Stkdelta
+ ll.N.Stkdelta = 0
+ }
+}
+
+func movelarge(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ movelargefn(l.N)
+ }
+ }
+}
+
+func movelargefn(fn *Node) {
+ var l *NodeList
+ var n *Node
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Class == PAUTO && n.Type != nil && n.Type.Width > MaxStackVarSize {
+ addrescapes(n)
+ }
+ }
+}
+
+func Cgen_checknil(n *Node) {
+ var reg Node
+
+ if Disable_checknil != 0 {
+ return
+ }
+
+ // Ideally we wouldn't see any integer types here, but we do.
+ if n.Type == nil || (!(Isptr[n.Type.Etype] != 0) && !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TUNSAFEPTR) {
+ Dump("checknil", n)
+ Fatal("bad checknil")
+ }
+
+ if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !(n.Addable != 0) || n.Op == OLITERAL {
+ Thearch.Regalloc(&reg, Types[Tptr], n)
+ Thearch.Cgen(n, &reg)
+ Thearch.Gins(obj.ACHECKNIL, &reg, nil)
+ Thearch.Regfree(&reg)
+ return
+ }
+
+ Thearch.Gins(obj.ACHECKNIL, n, nil)
+}
+
+/*
+ * ggen.c
+ */
+func compile(fn *Node) {
+ var pl *obj.Plist
+ var nod1 Node
+ var n *Node
+ var ptxt *obj.Prog
+ var p *obj.Prog
+ var lno int32
+ var t *Type
+ var save Iter
+ var oldstksize int64
+ var l *NodeList
+ var nam *Node
+ var gcargs *Sym
+ var gclocals *Sym
+
+ if Newproc == nil {
+ Newproc = Sysfunc("newproc")
+ Deferproc = Sysfunc("deferproc")
+ Deferreturn = Sysfunc("deferreturn")
+ Panicindex = Sysfunc("panicindex")
+ panicslice = Sysfunc("panicslice")
+ throwreturn = Sysfunc("throwreturn")
+ }
+
+ lno = setlineno(fn)
+
+ Curfn = fn
+ dowidth(Curfn.Type)
+
+ if fn.Nbody == nil {
+ if pure_go != 0 || strings.HasPrefix(fn.Nname.Sym.Name, "init·") {
+ Yyerror("missing function body", fn)
+ goto ret
+ }
+
+ if Debug['A'] != 0 {
+ goto ret
+ }
+ emitptrargsmap()
+ goto ret
+ }
+
+ saveerrors()
+
+ // set up domain for labels
+ clearlabels()
+
+ if Curfn.Type.Outnamed != 0 {
+ // add clearing of the output parameters
+ t = Structfirst(&save, Getoutarg(Curfn.Type))
+
+ for t != nil {
+ if t.Nname != nil {
+ n = Nod(OAS, t.Nname, nil)
+ typecheck(&n, Etop)
+ Curfn.Nbody = concat(list1(n), Curfn.Nbody)
+ }
+
+ t = structnext(&save)
+ }
+ }
+
+ order(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+
+ Hasdefer = 0
+ walk(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+ if flag_race != 0 {
+ racewalk(Curfn)
+ }
+ if nerrors != 0 {
+ goto ret
+ }
+
+ continpc = nil
+ breakpc = nil
+
+ pl = newplist()
+ pl.Name = Linksym(Curfn.Nname.Sym)
+
+ setlineno(Curfn)
+
+ Nodconst(&nod1, Types[TINT32], 0)
+ nam = Curfn.Nname
+ if isblank(nam) {
+ nam = nil
+ }
+ ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
+ if fn.Dupok != 0 {
+ ptxt.From3.Offset |= obj.DUPOK
+ }
+ if fn.Wrapper != 0 {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ if fn.Needctxt != 0 {
+ ptxt.From3.Offset |= obj.NEEDCTXT
+ }
+ if fn.Nosplit {
+ ptxt.From3.Offset |= obj.NOSPLIT
+ }
+
+ // Clumsy but important.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ if myimportpath != "" && myimportpath == "reflect" {
+ if Curfn.Nname.Sym.Name == "callReflect" || Curfn.Nname.Sym.Name == "callMethod" {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ }
+
+ Afunclit(&ptxt.From, Curfn.Nname)
+
+ Thearch.Ginit()
+
+ gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
+ gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
+
+ for t = Curfn.Paramfld; t != nil; t = t.Down {
+ gtrack(tracksym(t.Type))
+ }
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ Nodconst(&nod1, Types[TUINTPTR], l.N.Type.Width)
+ p = Thearch.Gins(obj.ATYPE, l.N, &nod1)
+ p.From.Gotype = Linksym(ngotype(l.N))
+ }
+ }
+
+ Genlist(Curfn.Enter)
+ Genlist(Curfn.Nbody)
+ Thearch.Gclean()
+ checklabels()
+ if nerrors != 0 {
+ goto ret
+ }
+ if Curfn.Endlineno != 0 {
+ lineno = Curfn.Endlineno
+ }
+
+ if Curfn.Type.Outtuple != 0 {
+ Thearch.Ginscall(throwreturn, 0)
+ }
+
+ Thearch.Ginit()
+
+ // TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
+ Thearch.Cgen_ret(nil)
+
+ if Hasdefer != 0 {
+ // deferreturn pretends to have one uintptr argument.
+ // Reserve space for it so stack scanner is happy.
+ if Maxarg < int64(Widthptr) {
+ Maxarg = int64(Widthptr)
+ }
+ }
+
+ Thearch.Gclean()
+ if nerrors != 0 {
+ goto ret
+ }
+
+ Pc.As = obj.ARET // overwrite AEND
+ Pc.Lineno = lineno
+
+ fixjmp(ptxt)
+ if !(Debug['N'] != 0) || Debug['R'] != 0 || Debug['P'] != 0 {
+ regopt(ptxt)
+ nilopt(ptxt)
+ }
+
+ Thearch.Expandchecks(ptxt)
+
+ oldstksize = Stksize
+ allocauto(ptxt)
+
+ if false {
+ fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
+ }
+
+ setlineno(Curfn)
+ if int64(Stksize)+Maxarg > 1<<31 {
+ Yyerror("stack frame too large (>2GB)")
+ goto ret
+ }
+
+ // Emit garbage collection symbols.
+ liveness(Curfn, ptxt, gcargs, gclocals)
+
+ gcsymdup(gcargs)
+ gcsymdup(gclocals)
+
+ Thearch.Defframe(ptxt)
+
+ if Debug['f'] != 0 {
+ frame(0)
+ }
+
+ // Remove leftover instrumentation from the instruction stream.
+ removevardef(ptxt)
+
+ret:
+ lineno = lno
+}
diff --git a/src/cmd/internal/gc/plive.go b/src/cmd/internal/gc/plive.go
new file mode 100644
index 0000000000..04173fccf5
--- /dev/null
+++ b/src/cmd/internal/gc/plive.go
@@ -0,0 +1,2018 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+)
+
+const (
+ UNVISITED = 0
+ VISITED = 1
+)
+
+// An ordinary basic block.
+//
+// Instructions are threaded together in a doubly-linked list. To iterate in
+// program order follow the link pointer from the first node and stop after the
+// last node has been visited
+//
+// for(p = bb->first;; p = p->link) {
+// ...
+// if(p == bb->last)
+// break;
+// }
+//
+// To iterate in reverse program order by following the opt pointer from the
+// last node
+//
+// for(p = bb->last; p != nil; p = p->opt) {
+// ...
+// }
+type BasicBlock struct {
+ pred []*BasicBlock
+ succ []*BasicBlock
+ first *obj.Prog
+ last *obj.Prog
+ rpo int
+ mark int
+ lastbitmapindex int
+}
+
+// A collection of global state used by liveness analysis.
+type Liveness struct {
+ fn *Node
+ ptxt *obj.Prog
+ vars []*Node
+ cfg []*BasicBlock
+ uevar []*Bvec
+ varkill []*Bvec
+ livein []*Bvec
+ liveout []*Bvec
+ avarinit []*Bvec
+ avarinitany []*Bvec
+ avarinitall []*Bvec
+ argslivepointers []*Bvec
+ livepointers []*Bvec
+}
+
+func xmalloc(size uint32) interface{} {
+ var result interface{}
+
+ result = make([]byte, size)
+ if result == nil {
+ Fatal("malloc failed")
+ }
+ return result
+}
+
+// Constructs a new basic block containing a single instruction.
+func newblock(prog *obj.Prog) *BasicBlock {
+ var result *BasicBlock
+
+ if prog == nil {
+ Fatal("newblock: prog cannot be nil")
+ }
+ result = new(BasicBlock)
+ result.rpo = -1
+ result.mark = UNVISITED
+ result.first = prog
+ result.last = prog
+ result.pred = make([]*BasicBlock, 0, 2)
+ result.succ = make([]*BasicBlock, 0, 2)
+ return result
+}
+
+// Frees a basic block and all of its leaf data structures.
+func freeblock(bb *BasicBlock) {
+ if bb == nil {
+ Fatal("freeblock: cannot free nil")
+ }
+}
+
+// Adds an edge between two basic blocks by making from a predecessor of to and
+// to a successor of from.
+func addedge(from *BasicBlock, to *BasicBlock) {
+ if from == nil {
+ Fatal("addedge: from is nil")
+ }
+ if to == nil {
+ Fatal("addedge: to is nil")
+ }
+ from.succ = append(from.succ, to)
+ to.pred = append(to.pred, from)
+}
+
+// Inserts prev before curr in the instruction
+// stream. Any control flow, such as branches or fall throughs, that target the
+// existing instruction are adjusted to target the new instruction.
+func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
+ var next *obj.Prog
+ var tmp obj.Prog
+
+ // There may be other instructions pointing at curr,
+ // and we want them to now point at prev. Instead of
+ // trying to find all such instructions, swap the contents
+ // so that the problem becomes inserting next after curr.
+ // The "opt" field is the backward link in the linked list.
+
+ // Overwrite curr's data with prev, but keep the list links.
+ tmp = *curr
+
+ *curr = *prev
+ curr.Opt = tmp.Opt
+ curr.Link = tmp.Link
+
+ // Overwrite prev (now next) with curr's old data.
+ next = prev
+
+ *next = tmp
+ next.Opt = nil
+ next.Link = nil
+
+ // Now insert next after curr.
+ next.Link = curr.Link
+
+ next.Opt = curr
+ curr.Link = next
+ if next.Link != nil && next.Link.Opt == curr {
+ next.Link.Opt = next
+ }
+
+ if bb.last == curr {
+ bb.last = next
+ }
+}
+
+// A pretty printer for basic blocks.
+func printblock(bb *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+ var prog *obj.Prog
+ var i int
+
+ fmt.Printf("basic block %d\n", bb.rpo)
+ fmt.Printf("\tpred:")
+ for i = 0; i < len(bb.pred); i++ {
+ pred = bb.pred[i]
+ fmt.Printf(" %d", pred.rpo)
+ }
+
+ fmt.Printf("\n")
+ fmt.Printf("\tsucc:")
+ for i = 0; i < len(bb.succ); i++ {
+ succ = bb.succ[i]
+ fmt.Printf(" %d", succ.rpo)
+ }
+
+ fmt.Printf("\n")
+ fmt.Printf("\tprog:\n")
+ for prog = bb.first; ; prog = prog.Link {
+ fmt.Printf("\t\t%v\n", prog)
+ if prog == bb.last {
+ break
+ }
+ }
+}
+
+// Iterates over a basic block applying a callback to each instruction. There
+// are two criteria for termination. If the end of basic block is reached a
+// value of zero is returned. If the callback returns a non-zero value, the
+// iteration is stopped and the value of the callback is returned.
+func blockany(bb *BasicBlock, callback func(*obj.Prog) int) int {
+ var p *obj.Prog
+ var result int
+
+ for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+ result = callback(p)
+ if result != 0 {
+ return result
+ }
+ }
+
+ return 0
+}
+
+// Collects and returns and array of Node*s for functions arguments and local
+// variables.
+func getvariables(fn *Node) []*Node {
+ var result []*Node
+ var ll *NodeList
+
+ result = make([]*Node, 0, 0)
+ for ll = fn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op == ONAME {
+ // In order for GODEBUG=gcdead=1 to work, each bitmap needs
+ // to contain information about all variables covered by the bitmap.
+ // For local variables, the bitmap only covers the stkptrsize
+ // bytes in the frame where variables containing pointers live.
+ // For arguments and results, the bitmap covers all variables,
+ // so we must include all the variables, even the ones without
+ // pointers.
+ //
+ // The Node.opt field is available for use by optimization passes.
+ // We use it to hold the index of the node in the variables array, plus 1
+ // (so that 0 means the Node is not in the variables array).
+ // Each pass should clear opt when done, but you never know,
+ // so clear them all ourselves too.
+ // The Node.curfn field is supposed to be set to the current function
+ // already, but for some compiler-introduced names it seems not to be,
+ // so fix that here.
+ // Later, when we want to find the index of a node in the variables list,
+ // we will check that n->curfn == curfn and n->opt > 0. Then n->opt - 1
+ // is the index in the variables list.
+ ll.N.Opt = nil
+
+ ll.N.Curfn = Curfn
+ switch ll.N.Class {
+ case PAUTO:
+ if haspointers(ll.N.Type) {
+ ll.N.Opt = int32(len(result))
+ result = append(result, ll.N)
+ }
+
+ case PPARAM,
+ PPARAMOUT:
+ ll.N.Opt = int32(len(result))
+ result = append(result, ll.N)
+ }
+ }
+ }
+
+ return result
+}
+
+// A pretty printer for control flow graphs. Takes an array of BasicBlock*s.
+func printcfg(cfg []*BasicBlock) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ printblock(bb)
+ }
+}
+
+// Assigns a reverse post order number to each connected basic block using the
+// standard algorithm. Unconnected blocks will not be affected.
+func reversepostorder(root *BasicBlock, rpo *int32) {
+ var bb *BasicBlock
+ var i int
+
+ root.mark = VISITED
+ for i = 0; i < len(root.succ); i++ {
+ bb = root.succ[i]
+ if bb.mark == UNVISITED {
+ reversepostorder(bb, rpo)
+ }
+ }
+
+ *rpo -= 1
+ root.rpo = int(*rpo)
+}
+
+// Comparison predicate used for sorting basic blocks by their rpo in ascending
+// order.
+type blockrpocmp []*BasicBlock
+
+func (x blockrpocmp) Len() int { return len(x) }
+func (x blockrpocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
+
+// A pattern matcher for call instructions. Returns true when the instruction
+// is a call to a specific package qualified function name.
+func iscall(prog *obj.Prog, name *obj.LSym) bool {
+ if prog == nil {
+ Fatal("iscall: prog is nil")
+ }
+ if name == nil {
+ Fatal("iscall: function name is nil")
+ }
+ if prog.As != obj.ACALL {
+ return false
+ }
+ return name == prog.To.Sym
+}
+
+// Returns true for instructions that call a runtime function implementing a
+// select communication clause.
+
+var isselectcommcasecall_names [5]*obj.LSym
+
+func isselectcommcasecall(prog *obj.Prog) int {
+ var i int32
+
+ if isselectcommcasecall_names[0] == nil {
+ isselectcommcasecall_names[0] = Linksym(Pkglookup("selectsend", Runtimepkg))
+ isselectcommcasecall_names[1] = Linksym(Pkglookup("selectrecv", Runtimepkg))
+ isselectcommcasecall_names[2] = Linksym(Pkglookup("selectrecv2", Runtimepkg))
+ isselectcommcasecall_names[3] = Linksym(Pkglookup("selectdefault", Runtimepkg))
+ }
+
+ for i = 0; isselectcommcasecall_names[i] != nil; i++ {
+ if iscall(prog, isselectcommcasecall_names[i]) {
+ return 1
+ }
+ }
+ return 0
+}
+
+// Returns true for call instructions that target runtime·newselect.
+
+var isnewselect_sym *obj.LSym
+
+func isnewselect(prog *obj.Prog) int {
+ if isnewselect_sym == nil {
+ isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
+ }
+ return bool2int(iscall(prog, isnewselect_sym))
+}
+
+// Returns true for call instructions that target runtime·selectgo.
+
+var isselectgocall_sym *obj.LSym
+
+func isselectgocall(prog *obj.Prog) int {
+ if isselectgocall_sym == nil {
+ isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
+ }
+ return bool2int(iscall(prog, isselectgocall_sym))
+}
+
+var isdeferreturn_sym *obj.LSym
+
+func isdeferreturn(prog *obj.Prog) int {
+ if isdeferreturn_sym == nil {
+ isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
+ }
+ return bool2int(iscall(prog, isdeferreturn_sym))
+}
+
+// Walk backwards from a runtime·selectgo call up to its immediately dominating
+// runtime·newselect call. Any successor nodes of communication clause nodes
+// are implicit successors of the runtime·selectgo call node. The goal of this
+// analysis is to add these missing edges to complete the control flow graph.
+func addselectgosucc(selectgo *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+
+ pred = selectgo
+ for {
+ if len(pred.pred) == 0 {
+ Fatal("selectgo does not have a newselect")
+ }
+ pred = pred.pred[0]
+ if blockany(pred, isselectcommcasecall) != 0 {
+ // A select comm case block should have exactly one
+ // successor.
+ if len(pred.succ) != 1 {
+ Fatal("select comm case has too many successors")
+ }
+ succ = pred.succ[0]
+
+ // Its successor should have exactly two successors.
+ // The drop through should flow to the selectgo block
+ // and the branch should lead to the select case
+ // statements block.
+ if len(succ.succ) != 2 {
+ Fatal("select comm case successor has too many successors")
+ }
+
+ // Add the block as a successor of the selectgo block.
+ addedge(selectgo, succ)
+ }
+
+ if blockany(pred, isnewselect) != 0 {
+ // Reached the matching newselect.
+ break
+ }
+ }
+}
+
+// The entry point for the missing selectgo control flow algorithm. Takes an
+// array of BasicBlock*s containing selectgo calls.
+func fixselectgo(selectgo []*BasicBlock) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(selectgo)); i++ {
+ bb = selectgo[i]
+ addselectgosucc(bb)
+ }
+}
+
+// Constructs a control flow graph from a sequence of instructions. This
+// procedure is complicated by various sources of implicit control flow that are
+// not accounted for using the standard cfg construction algorithm. Returns an
+// array of BasicBlock*s in control flow graph form (basic blocks ordered by
+// their RPO number).
+func newcfg(firstp *obj.Prog) []*BasicBlock {
+ var p *obj.Prog
+ var prev *obj.Prog
+ var bb *BasicBlock
+ var cfg []*BasicBlock
+ var selectgo []*BasicBlock
+ var i int32
+ var rpo int32
+
+ // Reset the opt field of each prog to nil. In the first and second
+ // passes, instructions that are labels temporarily use the opt field to
+ // point to their basic block. In the third pass, the opt field reset
+ // to point to the predecessor of an instruction in its basic block.
+ for p = firstp; p != nil; p = p.Link {
+ p.Opt = nil
+ }
+
+ // Allocate an array to remember where we have seen selectgo calls.
+ // These blocks will be revisited to add successor control flow edges.
+ selectgo = make([]*BasicBlock, 0, 0)
+
+ // Loop through all instructions identifying branch targets
+ // and fall-throughs and allocate basic blocks.
+ cfg = make([]*BasicBlock, 0, 0)
+
+ bb = newblock(firstp)
+ cfg = append(cfg, bb)
+ for p = firstp; p != nil; p = p.Link {
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.To.U.Branch == nil {
+ Fatal("prog branch to nil")
+ }
+ if p.To.U.Branch.Opt == nil {
+ p.To.U.Branch.Opt = newblock(p.To.U.Branch)
+ cfg = append(cfg, p.To.U.Branch.Opt.(*BasicBlock))
+ }
+
+ if p.As != obj.AJMP && p.Link != nil && p.Link.Opt == nil {
+ p.Link.Opt = newblock(p.Link)
+ cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+ }
+ } else if isselectcommcasecall(p) != 0 || isselectgocall(p) != 0 {
+ // Accommodate implicit selectgo control flow.
+ if p.Link.Opt == nil {
+ p.Link.Opt = newblock(p.Link)
+ cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+ }
+ }
+ }
+
+ // Loop through all basic blocks maximally growing the list of
+ // contained instructions until a label is reached. Add edges
+ // for branches and fall-through instructions.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ for p = bb.last; p != nil; p = p.Link {
+ if p.Opt != nil && p != bb.last {
+ break
+ }
+ bb.last = p
+
+ // Stop before an unreachable RET, to avoid creating
+ // unreachable control flow nodes.
+ if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
+ break
+ }
+
+ // Collect basic blocks with selectgo calls.
+ if isselectgocall(p) != 0 {
+ selectgo = append(selectgo, bb)
+ }
+ }
+
+ if bb.last.To.Type == obj.TYPE_BRANCH {
+ addedge(bb, bb.last.To.U.Branch.Opt.(*BasicBlock))
+ }
+ if bb.last.Link != nil {
+ // Add a fall-through when the instruction is
+ // not an unconditional control transfer.
+ if bb.last.As != obj.AJMP && bb.last.As != obj.ARET && bb.last.As != obj.AUNDEF {
+ addedge(bb, bb.last.Link.Opt.(*BasicBlock))
+ }
+ }
+ }
+
+ // Add back links so the instructions in a basic block can be traversed
+ // backward. This is the final state of the instruction opt field.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ p = bb.first
+ prev = nil
+ for {
+ p.Opt = prev
+ if p == bb.last {
+ break
+ }
+ prev = p
+ p = p.Link
+ }
+ }
+
+ // Add missing successor edges to the selectgo blocks.
+ if len(selectgo) != 0 {
+ fixselectgo([]*BasicBlock(selectgo))
+ }
+
+ // Find a depth-first order and assign a depth-first number to
+ // all basic blocks.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ bb.mark = UNVISITED
+ }
+
+ bb = cfg[0]
+ rpo = int32(len(cfg))
+ reversepostorder(bb, &rpo)
+
+ // Sort the basic blocks by their depth first number. The
+ // array is now a depth-first spanning tree with the first
+ // node being the root.
+ sort.Sort(blockrpocmp(cfg))
+
+ bb = cfg[0]
+
+ // Unreachable control flow nodes are indicated by a -1 in the rpo
+ // field. If we see these nodes something must have gone wrong in an
+ // upstream compilation phase.
+ if bb.rpo == -1 {
+ fmt.Printf("newcfg: unreachable basic block for %v\n", bb.last)
+ printcfg(cfg)
+ Fatal("newcfg: invalid control flow graph")
+ }
+
+ return cfg
+}
+
+// Frees a control flow graph (an array of BasicBlock*s) and all of its leaf
+// data structures.
+func freecfg(cfg []*BasicBlock) {
+ var bb *BasicBlock
+ var bb0 *BasicBlock
+ var p *obj.Prog
+ var i int32
+ var n int32
+
+ n = int32(len(cfg))
+ if n > 0 {
+ bb0 = cfg[0]
+ for p = bb0.first; p != nil; p = p.Link {
+ p.Opt = nil
+ }
+
+ for i = 0; i < n; i++ {
+ bb = cfg[i]
+ freeblock(bb)
+ }
+ }
+}
+
+// Returns true if the node names a variable that is otherwise uninteresting to
+// the liveness computation.
+func isfunny(n *Node) bool {
+ return n.Sym != nil && (n.Sym.Name == ".fp" || n.Sym.Name == ".args")
+}
+
+// Computes the effects of an instruction on a set of
+// variables. The vars argument is an array of Node*s.
+//
+// The output vectors give bits for variables:
+// uevar - used by this instruction
+// varkill - killed by this instruction
+// for variables without address taken, means variable was set
+// for variables with address taken, means variable was marked dead
+// avarinit - initialized or referred to by this instruction,
+// only for variables with address taken but not escaping to heap
+//
+// The avarinit output serves as a signal that the data has been
+// initialized, because any use of a variable must come after its
+// initialization.
+func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
+ var info ProgInfo
+ var from *obj.Addr
+ var to *obj.Addr
+ var node *Node
+ var i int32
+
+ bvresetall(uevar)
+ bvresetall(varkill)
+ bvresetall(avarinit)
+
+ Thearch.Proginfo(&info, prog)
+ if prog.As == obj.ARET {
+ // Return instructions implicitly read all the arguments. For
+ // the sake of correctness, out arguments must be read. For the
+ // sake of backtrace quality, we read in arguments as well.
+ //
+ // A return instruction with a p->to is a tail return, which brings
+ // the stack pointer back up (if it ever went down) and then jumps
+ // to a new function entirely. That form of instruction must read
+ // all the parameters for correctness, and similarly it must not
+ // read the out arguments - they won't be set until the new
+ // function runs.
+ for i = 0; i < int32(len(vars)); i++ {
+ node = vars[i]
+ switch node.Class &^ PHEAP {
+ case PPARAM:
+ bvset(uevar, i)
+
+ // If the result had its address taken, it is being tracked
+ // by the avarinit code, which does not use uevar.
+ // If we added it to uevar too, we'd not see any kill
+ // and decide that the varible was live entry, which it is not.
+ // So only use uevar in the non-addrtaken case.
+ // The p->to.type == thearch.D_NONE limits the bvset to
+ // non-tail-call return instructions; see note above
+ // the for loop for details.
+ case PPARAMOUT:
+ if !(node.Addrtaken != 0) && prog.To.Type == obj.TYPE_NONE {
+ bvset(uevar, i)
+ }
+ }
+ }
+
+ return
+ }
+
+ if prog.As == obj.ATEXT {
+ // A text instruction marks the entry point to a function and
+ // the definition point of all in arguments.
+ for i = 0; i < int32(len(vars)); i++ {
+ node = vars[i]
+ switch node.Class &^ PHEAP {
+ case PPARAM:
+ if node.Addrtaken != 0 {
+ bvset(avarinit, i)
+ }
+ bvset(varkill, i)
+ }
+ }
+
+ return
+ }
+
+ if info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
+ from = &prog.From
+ if from.Node != nil && from.Sym != nil && ((from.Node).(*Node)).Curfn == Curfn {
+ switch ((from.Node).(*Node)).Class &^ PHEAP {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ pos, ok := from.Node.(*Node).Opt.(int32) // index in vars
+ if !ok {
+ goto Next
+ }
+ if pos >= int32(len(vars)) || vars[pos] != from.Node {
+ Fatal("bad bookkeeping in liveness %v %d", Nconv(from.Node.(*Node), 0), pos)
+ }
+ if ((from.Node).(*Node)).Addrtaken != 0 {
+ bvset(avarinit, pos)
+ } else {
+ if info.Flags&(LeftRead|LeftAddr) != 0 {
+ bvset(uevar, pos)
+ }
+ if info.Flags&LeftWrite != 0 {
+ if from.Node != nil && !(Isfat(((from.Node).(*Node)).Type) != 0) {
+ bvset(varkill, pos)
+ }
+ }
+ }
+ }
+ }
+ }
+
+Next:
+ if info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
+ to = &prog.To
+ if to.Node != nil && to.Sym != nil && ((to.Node).(*Node)).Curfn == Curfn {
+ switch ((to.Node).(*Node)).Class &^ PHEAP {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
+ if !ok {
+ goto Next1
+ }
+ if pos >= int32(len(vars)) || vars[pos] != to.Node {
+ Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
+ }
+ if ((to.Node).(*Node)).Addrtaken != 0 {
+ if prog.As != obj.AVARKILL {
+ bvset(avarinit, pos)
+ }
+ if prog.As == obj.AVARDEF || prog.As == obj.AVARKILL {
+ bvset(varkill, pos)
+ }
+ } else {
+ // RightRead is a read, obviously.
+ // RightAddr by itself is also implicitly a read.
+ //
+ // RightAddr|RightWrite means that the address is being taken
+ // but only so that the instruction can write to the value.
+ // It is not a read. It is equivalent to RightWrite except that
+ // having the RightAddr bit set keeps the registerizer from
+ // trying to substitute a register for the memory location.
+ if (info.Flags&RightRead != 0) || info.Flags&(RightAddr|RightWrite) == RightAddr {
+ bvset(uevar, pos)
+ }
+ if info.Flags&RightWrite != 0 {
+ if to.Node != nil && (!(Isfat(((to.Node).(*Node)).Type) != 0) || prog.As == obj.AVARDEF) {
+ bvset(varkill, pos)
+ }
+ }
+ }
+ }
+ }
+ }
+
+Next1:
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation. The cfg argument is an array of BasicBlock*s and the
+// vars argument is an array of Node*s.
+func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
+ var result *Liveness
+ var i int32
+ var nblocks int32
+ var nvars int32
+
+ result = new(Liveness)
+ result.fn = fn
+ result.ptxt = ptxt
+ result.cfg = cfg
+ result.vars = vars
+
+ nblocks = int32(len(cfg))
+ result.uevar = make([]*Bvec, nblocks)
+ result.varkill = make([]*Bvec, nblocks)
+ result.livein = make([]*Bvec, nblocks)
+ result.liveout = make([]*Bvec, nblocks)
+ result.avarinit = make([]*Bvec, nblocks)
+ result.avarinitany = make([]*Bvec, nblocks)
+ result.avarinitall = make([]*Bvec, nblocks)
+
+ nvars = int32(len(vars))
+ for i = 0; i < nblocks; i++ {
+ result.uevar[i] = bvalloc(nvars)
+ result.varkill[i] = bvalloc(nvars)
+ result.livein[i] = bvalloc(nvars)
+ result.liveout[i] = bvalloc(nvars)
+ result.avarinit[i] = bvalloc(nvars)
+ result.avarinitany[i] = bvalloc(nvars)
+ result.avarinitall[i] = bvalloc(nvars)
+ }
+
+ result.livepointers = make([]*Bvec, 0, 0)
+ result.argslivepointers = make([]*Bvec, 0, 0)
+ return result
+}
+
+// Frees the liveness structure and all of its leaf data structures.
+func freeliveness(lv *Liveness) {
+ var i int32
+
+ if lv == nil {
+ Fatal("freeliveness: cannot free nil")
+ }
+
+ for i = 0; i < int32(len(lv.livepointers)); i++ {
+ }
+
+ for i = 0; i < int32(len(lv.argslivepointers)); i++ {
+ }
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ }
+}
+
+func printeffects(p *obj.Prog, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
+ fmt.Printf("effects of %v", p)
+ fmt.Printf("\nuevar: ")
+ bvprint(uevar)
+ fmt.Printf("\nvarkill: ")
+ bvprint(varkill)
+ fmt.Printf("\navarinit: ")
+ bvprint(avarinit)
+ fmt.Printf("\n")
+}
+
+// Pretty print a variable node. Uses Pascal like conventions for pointers and
+// addresses to avoid confusing the C like conventions used in the node variable
+// names.
+func printnode(node *Node) {
+ var p string
+ var a string
+
+ p = ""
+ if haspointers(node.Type) {
+ p = "^"
+ }
+ a = ""
+ if node.Addrtaken != 0 {
+ a = "@"
+ }
+ fmt.Printf(" %v%s%s", Nconv(node, 0), p, a)
+}
+
+// Pretty print a list of variables. The vars argument is an array of Node*s.
+func printvars(name string, bv *Bvec, vars []*Node) {
+ var i int32
+
+ fmt.Printf("%s:", name)
+ for i = 0; i < int32(len(vars)); i++ {
+ if bvget(bv, i) != 0 {
+ printnode(vars[i])
+ }
+ }
+ fmt.Printf("\n")
+}
+
+// Prints a basic block annotated with the information computed by liveness
+// analysis.
+func livenessprintblock(lv *Liveness, bb *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+ var prog *obj.Prog
+ var live *Bvec
+ var i int
+ var pos int32
+
+ fmt.Printf("basic block %d\n", bb.rpo)
+
+ fmt.Printf("\tpred:")
+ for i = 0; i < len(bb.pred); i++ {
+ pred = bb.pred[i]
+ fmt.Printf(" %d", pred.rpo)
+ }
+
+ fmt.Printf("\n")
+
+ fmt.Printf("\tsucc:")
+ for i = 0; i < len(bb.succ); i++ {
+ succ = bb.succ[i]
+ fmt.Printf(" %d", succ.rpo)
+ }
+
+ fmt.Printf("\n")
+
+ printvars("\tuevar", lv.uevar[bb.rpo], []*Node(lv.vars))
+ printvars("\tvarkill", lv.varkill[bb.rpo], []*Node(lv.vars))
+ printvars("\tlivein", lv.livein[bb.rpo], []*Node(lv.vars))
+ printvars("\tliveout", lv.liveout[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinit", lv.avarinit[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinitany", lv.avarinitany[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinitall", lv.avarinitall[bb.rpo], []*Node(lv.vars))
+
+ fmt.Printf("\tprog:\n")
+ for prog = bb.first; ; prog = prog.Link {
+ fmt.Printf("\t\t%v", prog)
+ if prog.As == obj.APCDATA && prog.From.Offset == obj.PCDATA_StackMapIndex {
+ pos = int32(prog.To.Offset)
+ live = lv.livepointers[pos]
+ fmt.Printf(" ")
+ bvprint(live)
+ }
+
+ fmt.Printf("\n")
+ if prog == bb.last {
+ break
+ }
+ }
+}
+
+// Prints a control flow graph annotated with any information computed by
+// liveness analysis.
+func livenessprintcfg(lv *Liveness) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ livenessprintblock(lv, bb)
+ }
+}
+
+func checkauto(fn *Node, p *obj.Prog, n *Node) {
+ var l *NodeList
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class == PAUTO && l.N == n {
+ return
+ }
+ }
+
+ if n == nil {
+ fmt.Printf("%v: checkauto %v: nil node in %v\n", p.Line(), Nconv(Curfn, 0), p)
+ return
+ }
+
+ fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
+ for l = fn.Dcl; l != nil; l = l.Next {
+ fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
+ }
+ Yyerror("checkauto: invariant lost")
+}
+
+func checkparam(fn *Node, p *obj.Prog, n *Node) {
+ var l *NodeList
+ var a *Node
+ var class int
+
+ if isfunny(n) {
+ return
+ }
+ for l = fn.Dcl; l != nil; l = l.Next {
+ a = l.N
+ class = int(a.Class) &^ PHEAP
+ if a.Op == ONAME && (class == PPARAM || class == PPARAMOUT) && a == n {
+ return
+ }
+ }
+
+ fmt.Printf("checkparam %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
+ for l = fn.Dcl; l != nil; l = l.Next {
+ fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
+ }
+ Yyerror("checkparam: invariant lost")
+}
+
+func checkprog(fn *Node, p *obj.Prog) {
+ if p.From.Name == obj.NAME_AUTO {
+ checkauto(fn, p, p.From.Node.(*Node))
+ }
+ if p.From.Name == obj.NAME_PARAM {
+ checkparam(fn, p, p.From.Node.(*Node))
+ }
+ if p.To.Name == obj.NAME_AUTO {
+ checkauto(fn, p, p.To.Node.(*Node))
+ }
+ if p.To.Name == obj.NAME_PARAM {
+ checkparam(fn, p, p.To.Node.(*Node))
+ }
+}
+
+// Check instruction invariants. We assume that the nodes corresponding to the
+// sources and destinations of memory operations will be declared in the
+// function. This is not strictly true, as is the case for the so-called funny
+// nodes and there are special cases to skip over that stuff. The analysis will
+// fail if this invariant blindly changes.
+func checkptxt(fn *Node, firstp *obj.Prog) {
+ var p *obj.Prog
+
+ if debuglive == 0 {
+ return
+ }
+
+ for p = firstp; p != nil; p = p.Link {
+ if false {
+ fmt.Printf("analyzing '%v'\n", p)
+ }
+ if p.As != obj.ADATA && p.As != obj.AGLOBL && p.As != obj.ATYPE {
+ checkprog(fn, p)
+ }
+ }
+}
+
+// NOTE: The bitmap for a specific type t should be cached in t after the first run
+// and then simply copied into bv at the correct offset on future calls with
+// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, twobitwalktype1
+// accounts for 40% of the 6g execution time.
+func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
+ var fieldoffset int64
+ var i int64
+ var o int64
+ var t1 *Type
+
+ if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
+ Fatal("twobitwalktype1: invalid initial alignment, %v", Tconv(t, 0))
+ }
+
+ switch t.Etype {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128:
+ for i = 0; i < t.Width; i++ {
+ bvset(bv, int32(((*xoffset+i)/int64(Widthptr))*obj.BitsPerPointer)) // 1 = live scalar (BitsScalar)
+ }
+
+ *xoffset += t.Width
+
+ case TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr (BitsPointer)
+ *xoffset += t.Width
+
+ // struct { byte *str; intgo len; }
+ case TSTRING:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ *xoffset += t.Width
+
+ // struct { Itab *tab; union { void *ptr, uintptr val } data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; union { void *ptr, uintptr val } data; }
+ case TINTER:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+3)) // 2 = live ptr in second slot (BitsPointer)
+ *xoffset += t.Width
+
+ // The value of t->bound is -1 for slices types and >0 for
+ // for fixed array types. All other values are invalid.
+ case TARRAY:
+ if t.Bound < -1 {
+ Fatal("twobitwalktype1: invalid bound, %v", Tconv(t, 0))
+ }
+ if Isslice(t) != 0 {
+ // struct { byte *array; uintgo len; uintgo cap; }
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid TARRAY alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ *xoffset += t.Width
+ } else {
+ for i = 0; i < t.Bound; i++ {
+ twobitwalktype1(t.Type, xoffset, bv)
+ }
+ }
+
+ case TSTRUCT:
+ o = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ fieldoffset = t1.Width
+ *xoffset += fieldoffset - o
+ twobitwalktype1(t1.Type, xoffset, bv)
+ o = fieldoffset + t1.Type.Width
+ }
+
+ *xoffset += t.Width - o
+
+ default:
+ Fatal("twobitwalktype1: unexpected type, %v", Tconv(t, 0))
+ }
+}
+
+// Returns the number of words of local variables.
+func localswords() int32 {
+ return int32(stkptrsize / int64(Widthptr))
+}
+
+// Returns the number of words of in and out arguments.
+func argswords() int32 {
+ return int32(Curfn.Type.Argwid / int64(Widthptr))
+}
+
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
+// argument is an array of Node*s.
+func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec, locals *Bvec) {
+ var node *Node
+ var thisargtype *Type
+ var inargtype *Type
+ var xoffset int64
+ var i int32
+
+ for i = 0; ; i++ {
+ i = int32(bvnext(liveout, i))
+ if !(i >= 0) {
+ break
+ }
+ node = vars[i]
+ switch node.Class {
+ case PAUTO:
+ xoffset = node.Xoffset + stkptrsize
+ twobitwalktype1(node.Type, &xoffset, locals)
+
+ case PPARAM,
+ PPARAMOUT:
+ xoffset = node.Xoffset
+ twobitwalktype1(node.Type, &xoffset, args)
+ }
+ }
+
+ // The node list only contains declared names.
+ // If the receiver or arguments are unnamed, they will be omitted
+ // from the list above. Preserve those values - even though they are unused -
+ // in order to keep their addresses live for use in stack traces.
+ thisargtype = getthisx(lv.fn.Type)
+
+ if thisargtype != nil {
+ xoffset = 0
+ twobitwalktype1(thisargtype, &xoffset, args)
+ }
+
+ inargtype = getinargx(lv.fn.Type)
+ if inargtype != nil {
+ xoffset = 0
+ twobitwalktype1(inargtype, &xoffset, args)
+ }
+}
+
+// Construct a disembodied instruction.
+func unlinkedprog(as int) *obj.Prog {
+ var p *obj.Prog
+
+ p = Ctxt.NewProg()
+ Clearp(p)
+ p.As = int16(as)
+ return p
+}
+
+// Construct a new PCDATA instruction associated with and for the purposes of
+// covering an existing instruction.
+func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
+ var from Node
+ var to Node
+ var pcdata *obj.Prog
+
+ Nodconst(&from, Types[TINT32], obj.PCDATA_StackMapIndex)
+ Nodconst(&to, Types[TINT32], int64(index))
+ pcdata = unlinkedprog(obj.APCDATA)
+ pcdata.Lineno = prog.Lineno
+ Naddr(&from, &pcdata.From, 0)
+ Naddr(&to, &pcdata.To, 0)
+ return pcdata
+}
+
+// Returns true for instructions that are safe points that must be annotated
+// with liveness information.
+func issafepoint(prog *obj.Prog) int {
+ return bool2int(prog.As == obj.ATEXT || prog.As == obj.ACALL)
+}
+
+// Initializes the sets for solving the live variables. Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func livenessprologue(lv *Liveness) {
+ var bb *BasicBlock
+ var uevar *Bvec
+ var varkill *Bvec
+ var avarinit *Bvec
+ var p *obj.Prog
+ var i int32
+ var nvars int32
+
+ nvars = int32(len(lv.vars))
+ uevar = bvalloc(nvars)
+ varkill = bvalloc(nvars)
+ avarinit = bvalloc(nvars)
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ // Walk the block instructions backward and update the block
+ // effects with the each prog effects.
+ for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ if debuglive >= 3 {
+ printeffects(p, uevar, varkill, avarinit)
+ }
+ bvor(lv.varkill[i], lv.varkill[i], varkill)
+ bvandnot(lv.uevar[i], lv.uevar[i], varkill)
+ bvor(lv.uevar[i], lv.uevar[i], uevar)
+ }
+
+ // Walk the block instructions forward to update avarinit bits.
+ // avarinit describes the effect at the end of the block, not the beginning.
+ bvresetall(varkill)
+
+ for p = bb.first; ; p = p.Link {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ if debuglive >= 3 {
+ printeffects(p, uevar, varkill, avarinit)
+ }
+ bvandnot(lv.avarinit[i], lv.avarinit[i], varkill)
+ bvor(lv.avarinit[i], lv.avarinit[i], avarinit)
+ if p == bb.last {
+ break
+ }
+ }
+ }
+}
+
+// Solve the liveness dataflow equations.
+func livenesssolve(lv *Liveness) {
+ var bb *BasicBlock
+ var succ *BasicBlock
+ var pred *BasicBlock
+ var newlivein *Bvec
+ var newliveout *Bvec
+ var any *Bvec
+ var all *Bvec
+ var rpo int32
+ var i int32
+ var j int32
+ var change int32
+
+ // These temporary bitvectors exist to avoid successive allocations and
+ // frees within the loop.
+ newlivein = bvalloc(int32(len(lv.vars)))
+
+ newliveout = bvalloc(int32(len(lv.vars)))
+ any = bvalloc(int32(len(lv.vars)))
+ all = bvalloc(int32(len(lv.vars)))
+
+ // Push avarinitall, avarinitany forward.
+ // avarinitall says the addressed var is initialized along all paths reaching the block exit.
+ // avarinitany says the addressed var is initialized along some path reaching the block exit.
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ rpo = int32(bb.rpo)
+ if i == 0 {
+ bvcopy(lv.avarinitall[rpo], lv.avarinit[rpo])
+ } else {
+ bvresetall(lv.avarinitall[rpo])
+ bvnot(lv.avarinitall[rpo])
+ }
+
+ bvcopy(lv.avarinitany[rpo], lv.avarinit[rpo])
+ }
+
+ change = 1
+ for change != 0 {
+ change = 0
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ rpo = int32(bb.rpo)
+ bvresetall(any)
+ bvresetall(all)
+ for j = 0; j < int32(len(bb.pred)); j++ {
+ pred = bb.pred[j]
+ if j == 0 {
+ bvcopy(any, lv.avarinitany[pred.rpo])
+ bvcopy(all, lv.avarinitall[pred.rpo])
+ } else {
+ bvor(any, any, lv.avarinitany[pred.rpo])
+ bvand(all, all, lv.avarinitall[pred.rpo])
+ }
+ }
+
+ bvandnot(any, any, lv.varkill[rpo])
+ bvandnot(all, all, lv.varkill[rpo])
+ bvor(any, any, lv.avarinit[rpo])
+ bvor(all, all, lv.avarinit[rpo])
+ if bvcmp(any, lv.avarinitany[rpo]) != 0 {
+ change = 1
+ bvcopy(lv.avarinitany[rpo], any)
+ }
+
+ if bvcmp(all, lv.avarinitall[rpo]) != 0 {
+ change = 1
+ bvcopy(lv.avarinitall[rpo], all)
+ }
+ }
+ }
+
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
+ // so low that it hardly seems to be worth the complexity.
+ change = 1
+
+ for change != 0 {
+ change = 0
+
+ // Walk blocks in the general direction of propagation. This
+ // improves convergence.
+ for i = int32(len(lv.cfg)) - 1; i >= 0; i-- {
+ // A variable is live on output from this block
+ // if it is live on input to some successor.
+ //
+ // out[b] = \bigcup_{s \in succ[b]} in[s]
+ bb = lv.cfg[i]
+
+ rpo = int32(bb.rpo)
+ bvresetall(newliveout)
+ for j = 0; j < int32(len(bb.succ)); j++ {
+ succ = bb.succ[j]
+ bvor(newliveout, newliveout, lv.livein[succ.rpo])
+ }
+
+ if bvcmp(lv.liveout[rpo], newliveout) != 0 {
+ change = 1
+ bvcopy(lv.liveout[rpo], newliveout)
+ }
+
+ // A variable is live on input to this block
+ // if it is live on output from this block and
+ // not set by the code in this block.
+ //
+ // in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+ bvandnot(newlivein, lv.liveout[rpo], lv.varkill[rpo])
+
+ bvor(lv.livein[rpo], newlivein, lv.uevar[rpo])
+ }
+ }
+}
+
+// This function is slow but it is only used for generating debug prints.
+// Check whether n is marked live in args/locals.
+func islive(n *Node, args *Bvec, locals *Bvec) int {
+ var i int
+
+ switch n.Class {
+ case PPARAM,
+ PPARAMOUT:
+ for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+ if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
+ return 1
+ }
+ }
+
+ case PAUTO:
+ for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+ if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func livenessepilogue(lv *Liveness) {
+ var bb *BasicBlock
+ var pred *BasicBlock
+ var ambig *Bvec
+ var livein *Bvec
+ var liveout *Bvec
+ var uevar *Bvec
+ var varkill *Bvec
+ var args *Bvec
+ var locals *Bvec
+ var avarinit *Bvec
+ var any *Bvec
+ var all *Bvec
+ var n *Node
+ var p *obj.Prog
+ var next *obj.Prog
+ var i int32
+ var j int32
+ var numlive int32
+ var startmsg int32
+ var nmsg int32
+ var nvars int32
+ var pos int32
+ var xoffset int64
+ var msg []string
+ var fmt_ string
+
+ nvars = int32(len(lv.vars))
+ livein = bvalloc(nvars)
+ liveout = bvalloc(nvars)
+ uevar = bvalloc(nvars)
+ varkill = bvalloc(nvars)
+ avarinit = bvalloc(nvars)
+ any = bvalloc(nvars)
+ all = bvalloc(nvars)
+ ambig = bvalloc(localswords() * obj.BitsPerPointer)
+ msg = nil
+ nmsg = 0
+ startmsg = 0
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ // Compute avarinitany and avarinitall for entry to block.
+ // This duplicates information known during livenesssolve
+ // but avoids storing two more vectors for each block.
+ bvresetall(any)
+
+ bvresetall(all)
+ for j = 0; j < int32(len(bb.pred)); j++ {
+ pred = bb.pred[j]
+ if j == 0 {
+ bvcopy(any, lv.avarinitany[pred.rpo])
+ bvcopy(all, lv.avarinitall[pred.rpo])
+ } else {
+ bvor(any, any, lv.avarinitany[pred.rpo])
+ bvand(all, all, lv.avarinitall[pred.rpo])
+ }
+ }
+
+ // Walk forward through the basic block instructions and
+ // allocate liveness maps for those instructions that need them.
+ // Seed the maps with information about the addrtaken variables.
+ for p = bb.first; ; p = p.Link {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ bvandnot(any, any, varkill)
+ bvandnot(all, all, varkill)
+ bvor(any, any, avarinit)
+ bvor(all, all, avarinit)
+
+ if issafepoint(p) != 0 {
+ // Annotate ambiguously live variables so that they can
+ // be zeroed at function entry.
+ // livein and liveout are dead here and used as temporaries.
+ bvresetall(livein)
+
+ bvandnot(liveout, any, all)
+ if !(bvisempty(liveout) != 0) {
+ for pos = 0; pos < liveout.n; pos++ {
+ if !(bvget(liveout, pos) != 0) {
+ continue
+ }
+ bvset(all, pos) // silence future warnings in this block
+ n = lv.vars[pos]
+ if !(n.Needzero != 0) {
+ n.Needzero = 1
+ if debuglive >= 1 {
+ Warnl(int(p.Lineno), "%v: %v is ambiguously live", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
+ }
+
+ // Record in 'ambiguous' bitmap.
+ xoffset = n.Xoffset + stkptrsize
+
+ twobitwalktype1(n.Type, &xoffset, ambig)
+ }
+ }
+ }
+
+ // Allocate a bit vector for each class and facet of
+ // value we are tracking.
+
+ // Live stuff first.
+ args = bvalloc(argswords() * obj.BitsPerPointer)
+
+ lv.argslivepointers = append(lv.argslivepointers, args)
+ locals = bvalloc(localswords() * obj.BitsPerPointer)
+ lv.livepointers = append(lv.livepointers, locals)
+
+ if debuglive >= 3 {
+ fmt.Printf("%v\n", p)
+ printvars("avarinitany", any, lv.vars)
+ }
+
+ // Record any values with an "address taken" reaching
+ // this code position as live. Must do now instead of below
+ // because the any/all calculation requires walking forward
+ // over the block (as this loop does), while the liveout
+ // requires walking backward (as the next loop does).
+ twobitlivepointermap(lv, any, lv.vars, args, locals)
+ }
+
+ if p == bb.last {
+ break
+ }
+ }
+
+ bb.lastbitmapindex = len(lv.livepointers) - 1
+ }
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ if debuglive >= 1 && Curfn.Nname.Sym.Name != "init" && Curfn.Nname.Sym.Name[0] != '.' {
+ nmsg = int32(len(lv.livepointers))
+ startmsg = nmsg
+ msg = make([]string, nmsg)
+ for j = 0; j < nmsg; j++ {
+ msg[j] = ""
+ }
+ }
+
+ // walk backward, emit pcdata and populate the maps
+ pos = int32(bb.lastbitmapindex)
+
+ if pos < 0 {
+ // the first block we encounter should have the ATEXT so
+ // at no point should pos ever be less than zero.
+ Fatal("livenessepilogue")
+ }
+
+ bvcopy(livein, lv.liveout[bb.rpo])
+ for p = bb.last; p != nil; p = next {
+ next = p.Opt.(*obj.Prog) // splicebefore modifies p->opt
+
+ // Propagate liveness information
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
+
+ bvcopy(liveout, livein)
+ bvandnot(livein, liveout, varkill)
+ bvor(livein, livein, uevar)
+ if debuglive >= 3 && issafepoint(p) != 0 {
+ fmt.Printf("%v\n", p)
+ printvars("uevar", uevar, lv.vars)
+ printvars("varkill", varkill, lv.vars)
+ printvars("livein", livein, lv.vars)
+ printvars("liveout", liveout, lv.vars)
+ }
+
+ if issafepoint(p) != 0 {
+ // Found an interesting instruction, record the
+ // corresponding liveness information.
+
+ // Useful sanity check: on entry to the function,
+ // the only things that can possibly be live are the
+ // input parameters.
+ if p.As == obj.ATEXT {
+ for j = 0; j < liveout.n; j++ {
+ if !(bvget(liveout, j) != 0) {
+ continue
+ }
+ n = lv.vars[j]
+ if n.Class != PPARAM {
+ yyerrorl(int(p.Lineno), "internal error: %v %v recorded as live on entry", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
+ }
+ }
+ }
+
+ // Record live pointers.
+ args = lv.argslivepointers[pos]
+
+ locals = lv.livepointers[pos]
+ twobitlivepointermap(lv, liveout, lv.vars, args, locals)
+
+ // Ambiguously live variables are zeroed immediately after
+ // function entry. Mark them live for all the non-entry bitmaps
+ // so that GODEBUG=gcdead=1 mode does not poison them.
+ if p.As == obj.ACALL {
+ bvor(locals, locals, ambig)
+ }
+
+ // Show live pointer bitmaps.
+ // We're interpreting the args and locals bitmap instead of liveout so that we
+ // include the bits added by the avarinit logic in the
+ // previous loop.
+ if msg != nil {
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("%v: live at ", p.Line())
+ if p.As == obj.ACALL && p.To.Node != nil {
+ fmt_ += fmt.Sprintf("call to %s:", ((p.To.Node).(*Node)).Sym.Name)
+ } else if p.As == obj.ACALL {
+ fmt_ += fmt.Sprintf("indirect call:")
+ } else {
+ fmt_ += fmt.Sprintf("entry to %s:", ((p.From.Node).(*Node)).Sym.Name)
+ }
+ numlive = 0
+ for j = 0; j < int32(len(lv.vars)); j++ {
+ n = lv.vars[j]
+ if islive(n, args, locals) != 0 {
+ fmt_ += fmt.Sprintf(" %v", Nconv(n, 0))
+ numlive++
+ }
+ }
+
+ fmt_ += fmt.Sprintf("\n")
+ if numlive == 0 { // squelch message
+
+ } else {
+ startmsg--
+ msg[startmsg] = fmt_
+ }
+ }
+
+ // Only CALL instructions need a PCDATA annotation.
+ // The TEXT instruction annotation is implicit.
+ if p.As == obj.ACALL {
+ if isdeferreturn(p) != 0 {
+ // runtime.deferreturn modifies its return address to return
+ // back to the CALL, not to the subsequent instruction.
+ // Because the return comes back one instruction early,
+ // the PCDATA must begin one instruction early too.
+ // The instruction before a call to deferreturn is always a
+ // no-op, to keep PC-specific data unambiguous.
+ splicebefore(lv, bb, newpcdataprog(p.Opt.(*obj.Prog), pos), p.Opt.(*obj.Prog))
+ } else {
+ splicebefore(lv, bb, newpcdataprog(p, pos), p)
+ }
+ }
+
+ pos--
+ }
+ }
+
+ if msg != nil {
+ for j = startmsg; j < nmsg; j++ {
+ if msg[j] != "" {
+ fmt.Printf("%s", msg[j])
+ }
+ }
+
+ msg = nil
+ nmsg = 0
+ startmsg = 0
+ }
+ }
+
+ Flusherrors()
+}
+
+// FNV-1 hash function constants.
+const (
+ H0 = 2166136261
+ Hp = 16777619
+)
+
+func hashbitmap(h uint32, bv *Bvec) uint32 {
+ var i int
+ var n int
+ var w uint32
+
+ n = int((bv.n + 31) / 32)
+ for i = 0; i < n; i++ {
+ w = bv.b[i]
+ h = (h * Hp) ^ (w & 0xff)
+ h = (h * Hp) ^ ((w >> 8) & 0xff)
+ h = (h * Hp) ^ ((w >> 16) & 0xff)
+ h = (h * Hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
+
+// Compact liveness information by coalescing identical per-call-site bitmaps.
+// The merging only happens for a single function, not across the entire binary.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func livenesscompact(lv *Liveness) {
+ var table []int
+ var remap []int
+ var i int
+ var j int
+ var n int
+ var tablesize int
+ var uniq int
+ var h uint32
+ var local *Bvec
+ var arg *Bvec
+ var jlocal *Bvec
+ var jarg *Bvec
+ var p *obj.Prog
+
+ // Linear probing hash table of bitmaps seen so far.
+ // The hash table has 4n entries to keep the linear
+ // scan short. An entry of -1 indicates an empty slot.
+ n = len(lv.livepointers)
+
+ tablesize = 4 * n
+ table = make([]int, tablesize)
+ for i := range table {
+ table[i] = -1
+ }
+
+ // remap[i] = the new index of the old bit vector #i.
+ remap = make([]int, n)
+
+ for i := range remap {
+ remap[i] = -1
+ }
+ uniq = 0 // unique tables found so far
+
+ // Consider bit vectors in turn.
+ // If new, assign next number using uniq,
+ // record in remap, record in lv->livepointers and lv->argslivepointers
+ // under the new index, and add entry to hash table.
+ // If already seen, record earlier index in remap and free bitmaps.
+ for i = 0; i < n; i++ {
+ local = lv.livepointers[i]
+ arg = lv.argslivepointers[i]
+ h = hashbitmap(hashbitmap(H0, local), arg) % uint32(tablesize)
+
+ for {
+ j = table[h]
+ if j < 0 {
+ break
+ }
+ jlocal = lv.livepointers[j]
+ jarg = lv.argslivepointers[j]
+ if bvcmp(local, jlocal) == 0 && bvcmp(arg, jarg) == 0 {
+ remap[i] = j
+ goto Next
+ }
+
+ h++
+ if h == uint32(tablesize) {
+ h = 0
+ }
+ }
+
+ table[h] = uniq
+ remap[i] = uniq
+ lv.livepointers[uniq] = local
+ lv.argslivepointers[uniq] = arg
+ uniq++
+ Next:
+ }
+
+ // We've already reordered lv->livepointers[0:uniq]
+ // and lv->argslivepointers[0:uniq] and freed the bitmaps
+ // we don't need anymore. Clear the pointers later in the
+ // array so that we can tell where the coalesced bitmaps stop
+ // and so that we don't double-free when cleaning up.
+ for j = uniq; j < n; j++ {
+ lv.livepointers[j] = nil
+ lv.argslivepointers[j] = nil
+ }
+
+ // Rewrite PCDATA instructions to use new numbering.
+ for p = lv.ptxt; p != nil; p = p.Link {
+ if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+ i = int(p.To.Offset)
+ if i >= 0 {
+ p.To.Offset = int64(remap[i])
+ }
+ }
+ }
+}
+
+func printbitset(printed int, name string, vars []*Node, bits *Bvec) int {
+ var i int
+ var started int
+ var n *Node
+
+ started = 0
+ for i = 0; i < len(vars); i++ {
+ if !(bvget(bits, int32(i)) != 0) {
+ continue
+ }
+ if !(started != 0) {
+ if !(printed != 0) {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ started = 1
+ printed = 1
+ fmt.Printf("%s=", name)
+ } else {
+ fmt.Printf(",")
+ }
+
+ n = vars[i]
+ fmt.Printf("%s", n.Sym.Name)
+ }
+
+ return printed
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func livenessprintdebug(lv *Liveness) {
+ var i int
+ var j int
+ var pcdata int
+ var printed int
+ var bb *BasicBlock
+ var p *obj.Prog
+ var uevar *Bvec
+ var varkill *Bvec
+ var avarinit *Bvec
+ var args *Bvec
+ var locals *Bvec
+ var n *Node
+
+ fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+
+ uevar = bvalloc(int32(len(lv.vars)))
+ varkill = bvalloc(int32(len(lv.vars)))
+ avarinit = bvalloc(int32(len(lv.vars)))
+
+ pcdata = 0
+ for i = 0; i < len(lv.cfg); i++ {
+ if i > 0 {
+ fmt.Printf("\n")
+ }
+ bb = lv.cfg[i]
+
+ // bb#0 pred=1,2 succ=3,4
+ fmt.Printf("bb#%d pred=", i)
+
+ for j = 0; j < len(bb.pred); j++ {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", (bb.pred[j]).rpo)
+ }
+
+ fmt.Printf(" succ=")
+ for j = 0; j < len(bb.succ); j++ {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", (bb.succ[j]).rpo)
+ }
+
+ fmt.Printf("\n")
+
+ // initial settings
+ printed = 0
+
+ printed = printbitset(printed, "uevar", lv.vars, lv.uevar[bb.rpo])
+ printed = printbitset(printed, "livein", lv.vars, lv.livein[bb.rpo])
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+
+ // program listing, with individual effects listed
+ for p = bb.first; ; p = p.Link {
+ fmt.Printf("%v\n", p)
+ if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+ pcdata = int(p.To.Offset)
+ }
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
+ printed = 0
+ printed = printbitset(printed, "uevar", lv.vars, uevar)
+ printed = printbitset(printed, "varkill", lv.vars, varkill)
+ printed = printbitset(printed, "avarinit", lv.vars, avarinit)
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+ if issafepoint(p) != 0 {
+ args = lv.argslivepointers[pcdata]
+ locals = lv.livepointers[pcdata]
+ fmt.Printf("\tlive=")
+ printed = 0
+ for j = 0; j < len(lv.vars); j++ {
+ n = lv.vars[j]
+ if islive(n, args, locals) != 0 {
+ tmp9 := printed
+ printed++
+ if tmp9 != 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", Nconv(n, 0))
+ }
+ }
+
+ fmt.Printf("\n")
+ }
+
+ if p == bb.last {
+ break
+ }
+ }
+
+ // bb bitsets
+ fmt.Printf("end\n")
+
+ printed = printbitset(printed, "varkill", lv.vars, lv.varkill[bb.rpo])
+ printed = printbitset(printed, "liveout", lv.vars, lv.liveout[bb.rpo])
+ printed = printbitset(printed, "avarinit", lv.vars, lv.avarinit[bb.rpo])
+ printed = printbitset(printed, "avarinitany", lv.vars, lv.avarinitany[bb.rpo])
+ printed = printbitset(printed, "avarinitall", lv.vars, lv.avarinitall[bb.rpo])
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+// Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// words that are followed are the raw bitmap words. The arr argument is an
+// array of Node*s.
+func twobitwritesymbol(arr []*Bvec, sym *Sym) {
+ var bv *Bvec
+ var off int
+ var i int
+ var j int
+ var n int
+ var word uint32
+
+ n = len(arr)
+ off = 0
+ off += 4 // number of bitmaps, to fill in later
+ bv = arr[0]
+ off = duint32(sym, off, uint32(bv.n)) // number of bits in each bitmap
+ for i = 0; i < n; i++ {
+ // bitmap words
+ bv = arr[i]
+
+ if bv == nil {
+ break
+ }
+ for j = 0; int32(j) < bv.n; j += 32 {
+ word = bv.b[j/32]
+
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ off = duint8(sym, off, uint8(word))
+
+ off = duint8(sym, off, uint8(word>>8))
+ off = duint8(sym, off, uint8(word>>16))
+ off = duint8(sym, off, uint8(word>>24))
+ }
+ }
+
+ duint32(sym, 0, uint32(i)) // number of bitmaps
+ ggloblsym(sym, int32(off), obj.RODATA)
+}
+
+func printprog(p *obj.Prog) {
+ for p != nil {
+ fmt.Printf("%v\n", p)
+ p = p.Link
+ }
+}
+
+// Entry pointer for liveness analysis. Constructs a complete CFG, solves for
+// the liveness of pointer variables in the function, and emits a runtime data
+// structure read by the garbage collector.
+func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
+ var cfg []*BasicBlock
+ var vars []*Node
+ var lv *Liveness
+ var debugdelta int
+ var l *NodeList
+
+ // Change name to dump debugging information only for a specific function.
+ debugdelta = 0
+
+ if Curfn.Nname.Sym.Name == "!" {
+ debugdelta = 2
+ }
+
+ debuglive += debugdelta
+ if debuglive >= 3 {
+ fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+ printprog(firstp)
+ }
+
+ checkptxt(fn, firstp)
+
+ // Construct the global liveness state.
+ cfg = newcfg(firstp)
+
+ if debuglive >= 3 {
+ printcfg([]*BasicBlock(cfg))
+ }
+ vars = getvariables(fn)
+ lv = newliveness(fn, firstp, cfg, vars)
+
+ // Run the dataflow framework.
+ livenessprologue(lv)
+
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenesssolve(lv)
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenessepilogue(lv)
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenesscompact(lv)
+
+ if debuglive >= 2 {
+ livenessprintdebug(lv)
+ }
+
+ // Emit the live pointer map data structures
+ twobitwritesymbol(lv.livepointers, livesym)
+
+ twobitwritesymbol(lv.argslivepointers, argssym)
+
+ // Free everything.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N != nil {
+ l.N.Opt = nil
+ }
+ }
+ freeliveness(lv)
+
+ freecfg([]*BasicBlock(cfg))
+
+ debuglive -= debugdelta
+}
diff --git a/src/cmd/internal/gc/popt.go b/src/cmd/internal/gc/popt.go
new file mode 100644
index 0000000000..6d6912008c
--- /dev/null
+++ b/src/cmd/internal/gc/popt.go
@@ -0,0 +1,1283 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// "Portable" optimizations.
+// Compiled separately for 5g, 6g, and 8g, so allowed to use gg.h, opt.h.
+// Must code to the intersection of the three back ends.
+
+// Derived from Inferno utils/6c/gc.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/gc.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+const (
+ CLOAD = 5
+ CREF = 5
+ CINF = 1000
+ LOOP = 3
+)
+
+type Reg struct {
+ set Bits
+ use1 Bits
+ use2 Bits
+ refbehind Bits
+ refahead Bits
+ calbehind Bits
+ calahead Bits
+ regdiff Bits
+ act Bits
+ regu uint64
+}
+
+type Rgn struct {
+ enter *Flow
+ cost int16
+ varno int16
+ regno int16
+}
+
+var Z *Node
+
+// A Reg is a wrapper around a single Prog (one instruction) that holds
+// register optimization information while the optimizer runs.
+// r->prog is the instruction.
+
+var R *Reg
+
+const (
+ NRGN = 600
+)
+
+// A Rgn represents a single regopt variable over a region of code
+// where a register could potentially be dedicated to that variable.
+// The code encompassed by a Rgn is defined by the flow graph,
+// starting at enter, flood-filling forward while varno is refahead
+// and backward while varno is refbehind, and following branches. A
+// single variable may be represented by multiple disjoint Rgns and
+// each Rgn may choose a different register for that variable.
+// Registers are allocated to regions greedily in order of descending
+// cost.
+
+var zreg Reg
+
+var region [NRGN]Rgn
+
+var rgp *Rgn
+
+var nregion int
+
+var nvar int
+
+var regbits uint64
+
+var externs Bits
+
+var params Bits
+
+var consts Bits
+
+var addrs Bits
+
+var ivar Bits
+
+var ovar Bits
+
+var change int
+
+var maxnr int32
+
+type OptStats struct {
+ Ncvtreg int32
+ Nspill int32
+ Nreload int32
+ Ndelmov int32
+ Nvar int32
+ Naddr int32
+}
+
+var Ostats OptStats
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+void peep(Prog*);
+void excise(Flow*);
+int copyu(Prog*, Adr*, Adr*);
+*/
+
+/*
+ * prog.c
+
+void proginfo(ProgInfo*, Prog*);
+*/
+// p is a call instruction. Does the call fail to return?
+
+var noreturn_symlist [10]*Sym
+
+func Noreturn(p *obj.Prog) int {
+ var s *Sym
+ var i int
+
+ if noreturn_symlist[0] == nil {
+ noreturn_symlist[0] = Pkglookup("panicindex", Runtimepkg)
+ noreturn_symlist[1] = Pkglookup("panicslice", Runtimepkg)
+ noreturn_symlist[2] = Pkglookup("throwinit", Runtimepkg)
+ noreturn_symlist[3] = Pkglookup("gopanic", Runtimepkg)
+ noreturn_symlist[4] = Pkglookup("panicwrap", Runtimepkg)
+ noreturn_symlist[5] = Pkglookup("throwreturn", Runtimepkg)
+ noreturn_symlist[6] = Pkglookup("selectgo", Runtimepkg)
+ noreturn_symlist[7] = Pkglookup("block", Runtimepkg)
+ }
+
+ if p.To.Node == nil {
+ return 0
+ }
+ s = ((p.To.Node).(*Node)).Sym
+ if s == nil {
+ return 0
+ }
+ for i = 0; noreturn_symlist[i] != nil; i++ {
+ if s == noreturn_symlist[i] {
+ return 1
+ }
+ }
+ return 0
+}
+
+// JMP chasing and removal.
+//
+// The code generator depends on being able to write out jump
+// instructions that it can jump to now but fill in later.
+// the linker will resolve them nicely, but they make the code
+// longer and more difficult to follow during debugging.
+// Remove them.
+
+/* what instruction does a JMP to p eventually land on? */
+func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
+ var n int
+
+ n = 0
+ for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
+ n++
+ if n > 10 {
+ *jmploop = 1
+ break
+ }
+
+ p = p.To.U.Branch
+ }
+
+ return p
+}
+
+/*
+ * reuse reg pointer for mark/sweep state.
+ * leave reg==nil at end because alive==nil.
+ */
+var alive interface{} = nil
+var dead interface{} = 1
+
+/* mark all code reachable from firstp as alive */
+func mark(firstp *obj.Prog) {
+ var p *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt != dead {
+ break
+ }
+ p.Opt = alive
+ if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch != nil {
+ mark(p.To.U.Branch)
+ }
+ if p.As == obj.AJMP || p.As == obj.ARET || p.As == obj.AUNDEF {
+ break
+ }
+ }
+}
+
+func fixjmp(firstp *obj.Prog) {
+ var jmploop int
+ var p *obj.Prog
+ var last *obj.Prog
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\nfixjmp\n")
+ }
+
+ // pass 1: resolve jump to jump, mark all code as dead.
+ jmploop = 0
+
+ for p = firstp; p != nil; p = p.Link {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+ if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch != nil && p.To.U.Branch.As == obj.AJMP {
+ p.To.U.Branch = chasejmp(p.To.U.Branch, &jmploop)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("->%v\n", p)
+ }
+ }
+
+ p.Opt = dead
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\n")
+ }
+
+ // pass 2: mark all reachable code alive
+ mark(firstp)
+
+ // pass 3: delete dead code (mostly JMPs).
+ last = nil
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt == dead {
+ if p.Link == nil && p.As == obj.ARET && last != nil && last.As != obj.ARET {
+ // This is the final ARET, and the code so far doesn't have one.
+ // Let it stay. The register allocator assumes that all live code in
+ // the function can be traversed by starting at all the RET instructions
+ // and following predecessor links. If we remove the final RET,
+ // this assumption will not hold in the case of an infinite loop
+ // at the end of a function.
+ // Keep the RET but mark it dead for the liveness analysis.
+ p.Mode = 1
+ } else {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("del %v\n", p)
+ }
+ continue
+ }
+ }
+
+ if last != nil {
+ last.Link = p
+ }
+ last = p
+ }
+
+ last.Link = nil
+
+ // pass 4: elide JMP to next instruction.
+ // only safe if there are no jumps to JMPs anymore.
+ if !(jmploop != 0) {
+ last = nil
+ for p = firstp; p != nil; p = p.Link {
+ if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("del %v\n", p)
+ }
+ continue
+ }
+
+ if last != nil {
+ last.Link = p
+ }
+ last = p
+ }
+
+ last.Link = nil
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\n")
+ for p = firstp; p != nil; p = p.Link {
+ fmt.Printf("%v\n", p)
+ }
+ fmt.Printf("\n")
+ }
+}
+
+// Control flow analysis. The Flow structures hold predecessor and successor
+// information as well as basic loop analysis.
+//
+// graph = flowstart(firstp, 0);
+// ... use flow graph ...
+// flowend(graph); // free graph
+//
+// Typical uses of the flow graph are to iterate over all the flow-relevant instructions:
+//
+// for(f = graph->start; f != nil; f = f->link)
+//
+// or, given an instruction f, to iterate over all the predecessors, which is
+// f->p1 and this list:
+//
+// for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+//
+// The size argument to flowstart specifies an amount of zeroed memory
+// to allocate in every f->data field, for use by the client.
+// If size == 0, f->data will be nil.
+
+func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
+ var id int
+ var nf int
+ var f *Flow
+ var f1 *Flow
+ var start *Flow
+ var last *Flow
+ var graph *Graph
+ var p *obj.Prog
+ var info ProgInfo
+
+ // Count and mark instructions to annotate.
+ nf = 0
+
+ for p = firstp; p != nil; p = p.Link {
+ p.Opt = nil // should be already, but just in case
+ Thearch.Proginfo(&info, p)
+ if info.Flags&Skip != 0 {
+ continue
+ }
+ p.Opt = interface{}(1)
+ nf++
+ }
+
+ if nf == 0 {
+ return nil
+ }
+
+ if nf >= 20000 {
+ // fatal("%S is too big (%d instructions)", curfn->nname->sym, nf);
+ return nil
+ }
+
+ // Allocate annotations and assign to instructions.
+ graph = new(Graph)
+ ff := make([]Flow, nf)
+ start = &ff[0]
+ id = 0
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt == nil {
+ continue
+ }
+ f := &ff[0]
+ ff = ff[1:]
+ p.Opt = f
+ f.Prog = p
+ if last != nil {
+ last.Link = f
+ }
+ last = f
+ if newData != nil {
+ f.Data = newData()
+ }
+ f.Id = int32(id)
+ id++
+ }
+
+ // Fill in pred/succ information.
+ for f = start; f != nil; f = f.Link {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if !(info.Flags&Break != 0) {
+ f1 = f.Link
+ f.S1 = f1
+ f1.P1 = f
+ }
+
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.To.U.Branch == nil {
+ Fatal("pnil %v", p)
+ }
+ f1 = p.To.U.Branch.Opt.(*Flow)
+ if f1 == nil {
+ Fatal("fnil %v / %v", p, p.To.U.Branch)
+ }
+ if f1 == f {
+ //fatal("self loop %P", p);
+ continue
+ }
+
+ f.S2 = f1
+ f.P2link = f1.P2
+ f1.P2 = f
+ }
+ }
+
+ graph.Start = start
+ graph.Num = nf
+ return graph
+}
+
+func Flowend(graph *Graph) {
+ var f *Flow
+
+ for f = graph.Start; f != nil; f = f.Link {
+ f.Prog.Opt = nil
+ }
+}
+
+/*
+ * find looping structure
+ *
+ * 1) find reverse postordering
+ * 2) find approximate dominators,
+ * the actual dominators if the flow graph is reducible
+ * otherwise, dominators plus some other non-dominators.
+ * See Matthew S. Hecht and Jeffrey D. Ullman,
+ * "Analysis of a Simple Algorithm for Global Data Flow Problems",
+ * Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
+ * Oct. 1-3, 1973, pp. 207-217.
+ * 3) find all nodes with a predecessor dominated by the current node.
+ * such a node is a loop head.
+ * recursively, all preds with a greater rpo number are in the loop
+ */
+func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
+ var r1 *Flow
+
+ r.Rpo = 1
+ r1 = r.S1
+ if r1 != nil && !(r1.Rpo != 0) {
+ n = postorder(r1, rpo2r, n)
+ }
+ r1 = r.S2
+ if r1 != nil && !(r1.Rpo != 0) {
+ n = postorder(r1, rpo2r, n)
+ }
+ rpo2r[n] = r
+ n++
+ return n
+}
+
+func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
+ var t int32
+
+ if rpo1 == -1 {
+ return rpo2
+ }
+ for rpo1 != rpo2 {
+ if rpo1 > rpo2 {
+ t = rpo2
+ rpo2 = rpo1
+ rpo1 = t
+ }
+
+ for rpo1 < rpo2 {
+ t = idom[rpo2]
+ if t >= rpo2 {
+ Fatal("bad idom")
+ }
+ rpo2 = t
+ }
+ }
+
+ return rpo1
+}
+
+func doms(idom []int32, r int32, s int32) int {
+ for s > r {
+ s = idom[s]
+ }
+ return bool2int(s == r)
+}
+
+func loophead(idom []int32, r *Flow) int {
+ var src int32
+
+ src = r.Rpo
+ if r.P1 != nil && doms(idom, src, r.P1.Rpo) != 0 {
+ return 1
+ }
+ for r = r.P2; r != nil; r = r.P2link {
+ if doms(idom, src, r.Rpo) != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+func loopmark(rpo2r **Flow, head int32, r *Flow) {
+ if r.Rpo < head || r.Active == head {
+ return
+ }
+ r.Active = head
+ r.Loop += LOOP
+ if r.P1 != nil {
+ loopmark(rpo2r, head, r.P1)
+ }
+ for r = r.P2; r != nil; r = r.P2link {
+ loopmark(rpo2r, head, r)
+ }
+}
+
+func flowrpo(g *Graph) {
+ var r1 *Flow
+ var i int32
+ var d int32
+ var me int32
+ var nr int32
+ var idom []int32
+ var rpo2r []*Flow
+
+ g.Rpo = make([]*Flow, g.Num)
+ idom = make([]int32, g.Num)
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+
+ rpo2r = g.Rpo
+ d = postorder(g.Start, rpo2r, 0)
+ nr = int32(g.Num)
+ if d > nr {
+ Fatal("too many reg nodes %d %d", d, nr)
+ }
+ nr = d
+ for i = 0; i < nr/2; i++ {
+ r1 = rpo2r[i]
+ rpo2r[i] = rpo2r[nr-1-i]
+ rpo2r[nr-1-i] = r1
+ }
+
+ for i = 0; i < nr; i++ {
+ rpo2r[i].Rpo = i
+ }
+
+ idom[0] = 0
+ for i = 0; i < nr; i++ {
+ r1 = rpo2r[i]
+ me = r1.Rpo
+ d = -1
+
+ // rpo2r[r->rpo] == r protects against considering dead code,
+ // which has r->rpo == 0.
+ if r1.P1 != nil && rpo2r[r1.P1.Rpo] == r1.P1 && r1.P1.Rpo < me {
+ d = r1.P1.Rpo
+ }
+ for r1 = r1.P2; r1 != nil; r1 = r1.P2link {
+ if rpo2r[r1.Rpo] == r1 && r1.Rpo < me {
+ d = rpolca(idom, d, r1.Rpo)
+ }
+ }
+ idom[i] = d
+ }
+
+ for i = 0; i < nr; i++ {
+ r1 = rpo2r[i]
+ r1.Loop++
+ if r1.P2 != nil && loophead(idom, r1) != 0 {
+ loopmark(&rpo2r[0], i, r1)
+ }
+ }
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+}
+
+func Uniqp(r *Flow) *Flow {
+ var r1 *Flow
+
+ r1 = r.P1
+ if r1 == nil {
+ r1 = r.P2
+ if r1 == nil || r1.P2link != nil {
+ return nil
+ }
+ } else if r.P2 != nil {
+ return nil
+ }
+ return r1
+}
+
+func Uniqs(r *Flow) *Flow {
+ var r1 *Flow
+
+ r1 = r.S1
+ if r1 == nil {
+ r1 = r.S2
+ if r1 == nil {
+ return nil
+ }
+ } else if r.S2 != nil {
+ return nil
+ }
+ return r1
+}
+
+// The compilers assume they can generate temporary variables
+// as needed to preserve the right semantics or simplify code
+// generation and the back end will still generate good code.
+// This results in a large number of ephemeral temporary variables.
+// Merge temps with non-overlapping lifetimes and equal types using the
+// greedy algorithm in Poletto and Sarkar, "Linear Scan Register Allocation",
+// ACM TOPLAS 1999.
+
+type TempVar struct {
+ node *Node
+ def *Flow
+ use *Flow
+ freelink *TempVar
+ merge *TempVar
+ start int64
+ end int64
+ addr uint8
+ removed uint8
+}
+
+type startcmp []*TempVar
+
+func (x startcmp) Len() int {
+ return len(x)
+}
+
+func (x startcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x startcmp) Less(i, j int) bool {
+ var a *TempVar
+ var b *TempVar
+
+ a = x[i]
+ b = x[j]
+
+ if a.start < b.start {
+ return true
+ }
+ if a.start > b.start {
+ return false
+ }
+
+ // Order what's left by id or symbol name,
+ // just so that sort is forced into a specific ordering,
+ // so that the result of the sort does not depend on
+ // the sort implementation.
+ if a.def != b.def {
+ return int(a.def.Id-b.def.Id) < 0
+ }
+ if a.node != b.node {
+ return stringsCompare(a.node.Sym.Name, b.node.Sym.Name) < 0
+ }
+ return false
+}
+
+// Is n available for merging?
+func canmerge(n *Node) int {
+ return bool2int(n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp"))
+}
+
+func mergetemp(firstp *obj.Prog) {
+ var i int
+ var j int
+ var nvar int
+ var ninuse int
+ var nfree int
+ var nkill int
+ var var_ []TempVar
+ var v *TempVar
+ var v1 *TempVar
+ var bystart []*TempVar
+ var inuse []*TempVar
+ var f *Flow
+ var l *NodeList
+ var lp **NodeList
+ var n *Node
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t *Type
+ var info ProgInfo
+ var info1 ProgInfo
+ var gen int32
+ var g *Graph
+ const (
+ debugmerge = 1
+ )
+
+ g = Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+
+ // Build list of all mergeable variables.
+ nvar = 0
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ if canmerge(l.N) != 0 {
+ nvar++
+ }
+ }
+
+ var_ = make([]TempVar, nvar)
+ nvar = 0
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if canmerge(n) != 0 {
+ v = &var_[nvar]
+ nvar++
+ n.Opt = v
+ v.node = n
+ }
+ }
+
+ // Build list of uses.
+ // We assume that the earliest reference to a temporary is its definition.
+ // This is not true of variables in general but our temporaries are all
+ // single-use (that's why we have so many!).
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+
+ if p.From.Node != nil && ((p.From.Node).(*Node)).Opt != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt != nil {
+ Fatal("double node %v", p)
+ }
+ v = nil
+ n, _ = p.From.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ }
+ if v == nil {
+ n, _ = p.To.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ }
+ }
+ if v != nil {
+ if v.def == nil {
+ v.def = f
+ }
+ f.Data = v.use
+ v.use = f
+ if n == p.From.Node && (info.Flags&LeftAddr != 0) {
+ v.addr = 1
+ }
+ }
+ }
+
+ if debugmerge > 1 && Debug['v'] != 0 {
+ Dumpit("before", g.Start, 0)
+ }
+
+ nkill = 0
+
+ // Special case.
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ if v.addr != 0 {
+ continue
+ }
+
+ // Used in only one instruction, which had better be a write.
+ f = v.use
+ if f != nil && f.Data.(*Flow) == nil {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if p.To.Node == v.node && (info.Flags&RightWrite != 0) && !(info.Flags&RightRead != 0) {
+ p.As = obj.ANOP
+ p.To = obj.Zprog.To
+ v.removed = 1
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("drop write-only %v\n", Sconv(v.node.Sym, 0))
+ }
+ } else {
+ Fatal("temp used and not set: %v", p)
+ }
+ nkill++
+ continue
+ }
+
+ // Written in one instruction, read in the next, otherwise unused,
+ // no jumps to the next instruction. Happens mainly in 386 compiler.
+ f = v.use
+ if f != nil && f.Link == f.Data.(*Flow) && (f.Data.(*Flow)).Data.(*Flow) == nil && Uniqp(f.Link) == f {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ p1 = f.Link.Prog
+ Thearch.Proginfo(&info1, p1)
+ const (
+ SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
+ )
+ if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && !((info.Flags|info1.Flags)&(LeftAddr|RightAddr) != 0) && info.Flags&SizeAny == info1.Flags&SizeAny {
+ p1.From = p.From
+ Thearch.Excise(f)
+ v.removed = 1
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("drop immediate-use %v\n", Sconv(v.node.Sym, 0))
+ }
+ }
+
+ nkill++
+ continue
+ }
+ }
+
+ // Traverse live range of each variable to set start, end.
+ // Each flood uses a new value of gen so that we don't have
+ // to clear all the r->active words after each variable.
+ gen = 0
+
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ gen++
+ for f = v.use; f != nil; f = f.Data.(*Flow) {
+ mergewalk(v, f, uint32(gen))
+ }
+ if v.addr != 0 {
+ gen++
+ for f = v.use; f != nil; f = f.Data.(*Flow) {
+ varkillwalk(v, f, uint32(gen))
+ }
+ }
+ }
+
+ // Sort variables by start.
+ bystart = make([]*TempVar, len(var_))
+
+ for i = 0; i < len(var_); i++ {
+ bystart[i] = &var_[i]
+ }
+ sort.Sort(startcmp(bystart[:len(var_)]))
+
+ // List of in-use variables, sorted by end, so that the ones that
+ // will last the longest are the earliest ones in the array.
+ // The tail inuse[nfree:] holds no-longer-used variables.
+ // In theory we should use a sorted tree so that insertions are
+ // guaranteed O(log n) and then the loop is guaranteed O(n log n).
+ // In practice, it doesn't really matter.
+ inuse = make([]*TempVar, len(var_))
+
+ ninuse = 0
+ nfree = len(var_)
+ for i = 0; i < len(var_); i++ {
+ v = bystart[i]
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
+ }
+
+ if v.removed != 0 {
+ continue
+ }
+
+ // Expire no longer in use.
+ for ninuse > 0 && inuse[ninuse-1].end < v.start {
+ ninuse--
+ v1 = inuse[ninuse]
+ nfree--
+ inuse[nfree] = v1
+ }
+
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: removed=%d nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(var_))
+ }
+
+ // Find old temp to reuse if possible.
+ t = v.node.Type
+
+ for j = nfree; j < len(var_); j++ {
+ v1 = inuse[j]
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%d,%d\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), Tconv(t, 0), Tconv(v1.node.Type, 0), v.node.Addrtaken, v1.node.Addrtaken)
+ }
+
+ // Require the types to match but also require the addrtaken bits to match.
+ // If a variable's address is taken, that disables registerization for the individual
+ // words of the variable (for example, the base,len,cap of a slice).
+ // We don't want to merge a non-addressed var with an addressed one and
+ // inhibit registerization of the former.
+ if Eqtype(t, v1.node.Type) && v.node.Addrtaken == v1.node.Addrtaken {
+ inuse[j] = inuse[nfree]
+ nfree++
+ if v1.merge != nil {
+ v.merge = v1.merge
+ } else {
+ v.merge = v1
+ }
+ nkill++
+ break
+ }
+ }
+
+ // Sort v into inuse.
+ j = ninuse
+ ninuse++
+
+ for j > 0 && inuse[j-1].end < v.end {
+ inuse[j] = inuse[j-1]
+ j--
+ }
+
+ inuse[j] = v
+ }
+
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("%v [%d - %d]\n", Sconv(Curfn.Nname.Sym, 0), len(var_), nkill)
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), Tconv(v.node.Type, 0), v.start, v.end)
+ if v.addr != 0 {
+ fmt.Printf(" addr=1")
+ }
+ if v.removed != 0 {
+ fmt.Printf(" dead=1")
+ }
+ if v.merge != nil {
+ fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
+ }
+ if v.start == v.end && v.def != nil {
+ fmt.Printf(" %v", v.def.Prog)
+ }
+ fmt.Printf("\n")
+ }
+
+ if debugmerge > 1 && Debug['v'] != 0 {
+ Dumpit("after", g.Start, 0)
+ }
+ }
+
+ // Update node references to use merged temporaries.
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ n, _ = p.From.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && v.merge != nil {
+ p.From.Node = v.merge.node
+ }
+ }
+ n, _ = p.To.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && v.merge != nil {
+ p.To.Node = v.merge.node
+ }
+ }
+ }
+
+ // Delete merged nodes from declaration list.
+ for lp = &Curfn.Dcl; ; {
+ l = *lp
+ if !(l != nil) {
+ break
+ }
+
+ Curfn.Dcl.End = l
+ n = l.N
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && (v.merge != nil || v.removed != 0) {
+ *lp = l.Next
+ continue
+ }
+
+ lp = &l.Next
+ }
+
+ // Clear aux structures.
+ for i = 0; i < len(var_); i++ {
+ var_[i].node.Opt = nil
+ }
+
+ Flowend(g)
+}
+
+func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
+ var p *obj.Prog
+ var f1 *Flow
+ var f *Flow
+ var f2 *Flow
+
+ for f1 = f0; f1 != nil; f1 = f1.P1 {
+ if uint32(f1.Active) == gen {
+ break
+ }
+ f1.Active = int32(gen)
+ p = f1.Prog
+ if v.end < p.Pc {
+ v.end = p.Pc
+ }
+ if f1 == v.def {
+ v.start = p.Pc
+ break
+ }
+ }
+
+ for f = f0; f != f1; f = f.P1 {
+ for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+ mergewalk(v, f2, gen)
+ }
+ }
+}
+
+func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
+ var p *obj.Prog
+ var f1 *Flow
+ var f *Flow
+
+ for f1 = f0; f1 != nil; f1 = f1.S1 {
+ if uint32(f1.Active) == gen {
+ break
+ }
+ f1.Active = int32(gen)
+ p = f1.Prog
+ if v.end < p.Pc {
+ v.end = p.Pc
+ }
+ if v.start > p.Pc {
+ v.start = p.Pc
+ }
+ if p.As == obj.ARET || (p.As == obj.AVARKILL && p.To.Node == v.node) {
+ break
+ }
+ }
+
+ for f = f0; f != f1; f = f.S1 {
+ varkillwalk(v, f.S2, gen)
+ }
+}
+
+// Eliminate redundant nil pointer checks.
+//
+// The code generation pass emits a CHECKNIL for every possibly nil pointer.
+// This pass removes a CHECKNIL if every predecessor path has already
+// checked this value for nil.
+//
+// Simple backwards flood from check to definition.
+// Run prog loop backward from end of program to beginning to avoid quadratic
+// behavior removing a run of checks.
+//
+// Assume that stack variables with address not taken can be loaded multiple times
+// from memory without being rechecked. Other variables need to be checked on
+// each load.
+type NilVar struct {
+}
+
+var killed int // f->data is either nil or &killed
+
+func nilopt(firstp *obj.Prog) {
+ var f *Flow
+ var p *obj.Prog
+ var g *Graph
+ var ncheck int
+ var nkill int
+
+ g = Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+
+ if Debug_checknil > 1 { /* || strcmp(curfn->nname->sym->name, "f1") == 0 */
+ Dumpit("nilopt", g.Start, 0)
+ }
+
+ ncheck = 0
+ nkill = 0
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As != obj.ACHECKNIL || !(Thearch.Regtyp(&p.From) != 0) {
+ continue
+ }
+ ncheck++
+ if Thearch.Stackaddr(&p.From) != 0 {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed nil check of SP address")
+ }
+ f.Data = &killed
+ continue
+ }
+
+ nilwalkfwd(f)
+ if f.Data != nil {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed nil check before indirect")
+ }
+ continue
+ }
+
+ nilwalkback(f)
+ if f.Data != nil {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed repeated nil check")
+ }
+ continue
+ }
+ }
+
+ for f = g.Start; f != nil; f = f.Link {
+ if f.Data != nil {
+ nkill++
+ Thearch.Excise(f)
+ }
+ }
+
+ Flowend(g)
+
+ if Debug_checknil > 1 {
+ fmt.Printf("%v: removed %d of %d nil checks\n", Sconv(Curfn.Nname.Sym, 0), nkill, ncheck)
+ }
+}
+
+func nilwalkback(fcheck *Flow) {
+ var p *obj.Prog
+ var info ProgInfo
+ var f *Flow
+
+ for f = fcheck; f != nil; f = Uniqp(f) {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+ // Found initialization of value we're checking for nil.
+ // without first finding the check, so this one is unchecked.
+ return
+ }
+
+ if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) != 0 {
+ fcheck.Data = &killed
+ return
+ }
+ }
+}
+
+// Here is a more complex version that scans backward across branches.
+// It assumes fcheck->kill = 1 has been set on entry, and its job is to find a reason
+// to keep the check (setting fcheck->kill = 0).
+// It doesn't handle copying of aggregates as well as I would like,
+// nor variables with their address taken,
+// and it's too subtle to turn on this late in Go 1.2. Perhaps for Go 1.3.
+/*
+for(f1 = f0; f1 != nil; f1 = f1->p1) {
+ if(f1->active == gen)
+ break;
+ f1->active = gen;
+ p = f1->prog;
+
+ // If same check, stop this loop but still check
+ // alternate predecessors up to this point.
+ if(f1 != fcheck && p->as == ACHECKNIL && thearch.sameaddr(&p->from, &fcheck->prog->from))
+ break;
+
+ thearch.proginfo(&info, p);
+ if((info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
+ // Found initialization of value we're checking for nil.
+ // without first finding the check, so this one is unchecked.
+ fcheck->kill = 0;
+ return;
+ }
+
+ if(f1->p1 == nil && f1->p2 == nil) {
+ print("lost pred for %P\n", fcheck->prog);
+ for(f1=f0; f1!=nil; f1=f1->p1) {
+ thearch.proginfo(&info, f1->prog);
+ print("\t%P %d %d %D %D\n", r1->prog, info.flags&RightWrite, thearch.sameaddr(&f1->prog->to, &fcheck->prog->from), &f1->prog->to, &fcheck->prog->from);
+ }
+ fatal("lost pred trail");
+ }
+}
+
+for(f = f0; f != f1; f = f->p1)
+ for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+ nilwalkback(fcheck, f2, gen);
+*/
+func nilwalkfwd(fcheck *Flow) {
+ var f *Flow
+ var last *Flow
+ var p *obj.Prog
+ var info ProgInfo
+
+ // If the path down from rcheck dereferences the address
+ // (possibly with a small offset) before writing to memory
+ // and before any subsequent checks, it's okay to wait for
+ // that implicit check. Only consider this basic block to
+ // avoid problems like:
+ // _ = *x // should panic
+ // for {} // no writes but infinite loop may be considered visible
+ last = nil
+
+ for f = Uniqs(fcheck); f != nil; f = Uniqs(f) {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+
+ if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) != 0 {
+ fcheck.Data = &killed
+ return
+ }
+
+ if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) != 0 {
+ fcheck.Data = &killed
+ return
+ }
+
+ // Stop if another nil check happens.
+ if p.As == obj.ACHECKNIL {
+ return
+ }
+
+ // Stop if value is lost.
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+ return
+ }
+
+ // Stop if memory write.
+ if (info.Flags&RightWrite != 0) && !(Thearch.Regtyp(&p.To) != 0) {
+ return
+ }
+
+ // Stop if we jump backward.
+ if last != nil && f.Id <= last.Id {
+ return
+ }
+ last = f
+ }
+}
diff --git a/src/cmd/internal/gc/racewalk.go b/src/cmd/internal/gc/racewalk.go
new file mode 100644
index 0000000000..fe7a82cf72
--- /dev/null
+++ b/src/cmd/internal/gc/racewalk.go
@@ -0,0 +1,681 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "strings"
+)
+
+// The racewalk pass modifies the code tree for the function as follows:
+//
+// 1. It inserts a call to racefuncenter at the beginning of each function.
+// 2. It inserts a call to racefuncexit at the end of each function.
+// 3. It inserts a call to raceread before each memory read.
+// 4. It inserts a call to racewrite before each memory write.
+//
+// The rewriting is not yet complete. Certain nodes are not rewritten
+// but should be.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var omit_pkgs = []string{"runtime", "runtime/race"}
+
+// Only insert racefuncenter/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var noinst_pkgs = []string{"sync", "sync/atomic"}
+
+func ispkgin(pkgs []string) int {
+ var i int
+
+ if myimportpath != "" {
+ for i = 0; i < len(pkgs); i++ {
+ if myimportpath == pkgs[i] {
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+func isforkfunc(fn *Node) int {
+ // Special case for syscall.forkAndExecInChild.
+ // In the child, this function must not acquire any locks, because
+ // they might have been locked at the time of the fork. This means
+ // no rescheduling, no malloc calls, and no new stack segments.
+ // Race instrumentation does all of the above.
+ return bool2int(myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild")
+}
+
+func racewalk(fn *Node) {
+ var nd *Node
+ var nodpc *Node
+ var s string
+
+ if ispkgin(omit_pkgs) != 0 || isforkfunc(fn) != 0 {
+ return
+ }
+
+ if !(ispkgin(noinst_pkgs) != 0) {
+ racewalklist(fn.Nbody, nil)
+
+ // nothing interesting for race detector in fn->enter
+ racewalklist(fn.Exit, nil)
+ }
+
+ // nodpc is the PC of the caller as extracted by
+ // getcallerpc. We use -widthptr(FP) for x86.
+ // BUG: this will not work on arm.
+ nodpc = Nod(OXXX, nil, nil)
+
+ *nodpc = *nodfp
+ nodpc.Type = Types[TUINTPTR]
+ nodpc.Xoffset = int64(-Widthptr)
+ nd = mkcall("racefuncenter", nil, nil, nodpc)
+ fn.Enter = concat(list1(nd), fn.Enter)
+ nd = mkcall("racefuncexit", nil, nil)
+ fn.Exit = list(fn.Exit, nd)
+
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("after racewalk %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Nbody)
+ s = fmt.Sprintf("enter %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Enter)
+ s = fmt.Sprintf("exit %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Exit)
+ }
+}
+
+func racewalklist(l *NodeList, init **NodeList) {
+ var instr *NodeList
+
+ for ; l != nil; l = l.Next {
+ instr = nil
+ racewalknode(&l.N, &instr, 0, 0)
+ if init == nil {
+ l.N.Ninit = concat(l.N.Ninit, instr)
+ } else {
+ *init = concat(*init, instr)
+ }
+ }
+}
+
+// walkexpr and walkstmt combined
+// walks the tree and adds calls to the
+// instrumentation code to top-level (statement) nodes' init
+func racewalknode(np **Node, init **NodeList, wr int, skip int) {
+ var n *Node
+ var n1 *Node
+ var l *NodeList
+ var fini *NodeList
+
+ n = *np
+
+ if n == nil {
+ return
+ }
+
+ if Debug['w'] > 1 {
+ Dump("racewalk-before", n)
+ }
+ setlineno(n)
+ if init == nil {
+ Fatal("racewalk: bad init list")
+ }
+ if init == &n.Ninit {
+ // If init == &n->ninit and n->ninit is non-nil,
+ // racewalknode might append it to itself.
+ // nil it out and handle it separately before putting it back.
+ l = n.Ninit
+
+ n.Ninit = nil
+ racewalklist(l, nil)
+ racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
+ appendinit(&n, l)
+ *np = n
+ return
+ }
+
+ racewalklist(n.Ninit, nil)
+
+ switch n.Op {
+ default:
+ Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case OAS,
+ OAS2FUNC:
+ racewalknode(&n.Left, init, 1, 0)
+ racewalknode(&n.Right, init, 0, 0)
+ goto ret
+
+ // can't matter
+ case OCFUNC,
+ OVARKILL:
+ goto ret
+
+ case OBLOCK:
+ if n.List == nil {
+ goto ret
+ }
+
+ switch n.List.N.Op {
+ // Blocks are used for multiple return function calls.
+ // x, y := f() becomes BLOCK{CALL f, AS x [SP+0], AS y [SP+n]}
+ // We don't want to instrument between the statements because it will
+ // smash the results.
+ case OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ racewalknode(&n.List.N, &n.List.N.Ninit, 0, 0)
+
+ fini = nil
+ racewalklist(n.List.Next, &fini)
+ n.List = concat(n.List, fini)
+
+ // Ordinary block, for loop initialization or inlined bodies.
+ default:
+ racewalklist(n.List, nil)
+ }
+
+ goto ret
+
+ case ODEFER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case OPROC:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case OCALLINTER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ // Instrument dst argument of runtime.writebarrier* calls
+ // as we do not instrument runtime code.
+ // typedslicecopy is instrumented in runtime.
+ case OCALLFUNC:
+ if n.Left.Sym != nil && n.Left.Sym.Pkg == Runtimepkg && (strings.HasPrefix(n.Left.Sym.Name, "writebarrier") || n.Left.Sym.Name == "typedmemmove") {
+ // Find the dst argument.
+ // The list can be reordered, so it's not necessary just the first or the second element.
+ for l = n.List; l != nil; l = l.Next {
+ if n.Left.Sym.Name == "typedmemmove" {
+ if l.N.Left.Xoffset == int64(Widthptr) {
+ break
+ }
+ } else {
+ if l.N.Left.Xoffset == 0 {
+ break
+ }
+ }
+ }
+
+ if l == nil {
+ Fatal("racewalk: writebarrier no arg")
+ }
+ if l.N.Right.Op != OADDR {
+ Fatal("racewalk: writebarrier bad arg")
+ }
+ callinstr(&l.N.Right.Left, init, 1, 0)
+ }
+
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case ONOT,
+ OMINUS,
+ OPLUS,
+ OREAL,
+ OIMAG,
+ OCOM:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case ODOTINTER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case ODOT:
+ racewalknode(&n.Left, init, 0, 1)
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
+ racewalknode(&n.Left, init, 0, 0)
+
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OIND: // *p
+ racewalknode(&n.Left, init, 0, 0)
+
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OSPTR,
+ OLEN,
+ OCAP:
+ racewalknode(&n.Left, init, 0, 0)
+ if Istype(n.Left.Type, TMAP) != 0 {
+ n1 = Nod(OCONVNOP, n.Left, nil)
+ n1.Type = Ptrto(Types[TUINT8])
+ n1 = Nod(OIND, n1, nil)
+ typecheck(&n1, Erv)
+ callinstr(&n1, init, 0, skip)
+ }
+
+ goto ret
+
+ case OLSH,
+ ORSH,
+ OLROT,
+ OAND,
+ OANDNOT,
+ OOR,
+ OXOR,
+ OSUB,
+ OMUL,
+ OHMUL,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ OADD,
+ OCOMPLEX:
+ racewalknode(&n.Left, init, wr, 0)
+ racewalknode(&n.Right, init, wr, 0)
+ goto ret
+
+ case OANDAND,
+ OOROR:
+ racewalknode(&n.Left, init, wr, 0)
+
+ // walk has ensured the node has moved to a location where
+ // side effects are safe.
+ // n->right may not be executed,
+ // so instrumentation goes to n->right->ninit, not init.
+ racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
+
+ goto ret
+
+ case ONAME:
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OCONV:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case OCONVNOP:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case ODIV,
+ OMOD:
+ racewalknode(&n.Left, init, wr, 0)
+ racewalknode(&n.Right, init, wr, 0)
+ goto ret
+
+ case OINDEX:
+ if !(Isfixedarray(n.Left.Type) != 0) {
+ racewalknode(&n.Left, init, 0, 0)
+ } else if !(islvalue(n.Left) != 0) {
+ // index of unaddressable array, like Map[k][i].
+ racewalknode(&n.Left, init, wr, 0)
+
+ racewalknode(&n.Right, init, 0, 0)
+ goto ret
+ }
+
+ racewalknode(&n.Right, init, 0, 0)
+ if n.Left.Type.Etype != TSTRING {
+ callinstr(&n, init, wr, skip)
+ }
+ goto ret
+
+ // Seems to only lead to double instrumentation.
+ //racewalknode(&n->left, init, 0, 0);
+ case OSLICE,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR:
+ goto ret
+
+ case OADDR:
+ racewalknode(&n.Left, init, 0, 1)
+ goto ret
+
+ // n->left is Type* which is not interesting.
+ case OEFACE:
+ racewalknode(&n.Right, init, 0, 0)
+
+ goto ret
+
+ case OITAB:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ // should not appear in AST by now
+ case OSEND,
+ ORECV,
+ OCLOSE,
+ ONEW,
+ OXCASE,
+ OXFALL,
+ OCASE,
+ OPANIC,
+ ORECOVER,
+ OCONVIFACE,
+ OCMPIFACE,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OCALL,
+ OCOPY,
+ OAPPEND,
+ ORUNESTR,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ OINDEXMAP,
+ // lowered to call
+ OCMPSTR,
+ OADDSTR,
+ ODOTTYPE,
+ ODOTTYPE2,
+ OAS2DOTTYPE,
+ OCALLPART,
+ // lowered to PTRLIT
+ OCLOSURE, // lowered to PTRLIT
+ ORANGE, // lowered to ordinary for loop
+ OARRAYLIT, // lowered to assignments
+ OMAPLIT,
+ OSTRUCTLIT,
+ OAS2,
+ OAS2RECV,
+ OAS2MAPR,
+ OASOP:
+ Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
+
+ goto ret
+
+ // impossible nodes: only appear in backend.
+ case ORROTC,
+ OEXTEND:
+ Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
+
+ goto ret
+
+ // just do generic traversal
+ case OFOR,
+ OIF,
+ OCALLMETH,
+ ORETURN,
+ ORETJMP,
+ OSWITCH,
+ OSELECT,
+ OEMPTY,
+ OBREAK,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL:
+ goto ret
+
+ // does not require instrumentation
+ case OPRINT, // don't bother instrumenting it
+ OPRINTN, // don't bother instrumenting it
+ OCHECKNIL, // always followed by a read.
+ OPARAM, // it appears only in fn->exit to copy heap params back
+ OCLOSUREVAR, // immutable pointer to captured variable
+ ODOTMETH, // either part of CALLMETH or CALLPART (lowered to PTRLIT)
+ OINDREG, // at this stage, only n(SP) nodes from nodarg
+ ODCL, // declarations (without value) cannot be races
+ ODCLCONST,
+ ODCLTYPE,
+ OTYPE,
+ ONONAME,
+ OLITERAL,
+ OSLICESTR,
+ // always preceded by bounds checking, avoid double instrumentation.
+ OTYPESW: // ignored by code generation, do not instrument.
+ goto ret
+ }
+
+ret:
+ if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
+ racewalklist(n.List, init)
+ }
+ if n.Ntest != nil {
+ racewalknode(&n.Ntest, &n.Ntest.Ninit, 0, 0)
+ }
+ if n.Nincr != nil {
+ racewalknode(&n.Nincr, &n.Nincr.Ninit, 0, 0)
+ }
+ racewalklist(n.Nbody, nil)
+ racewalklist(n.Nelse, nil)
+ racewalklist(n.Rlist, nil)
+ *np = n
+}
+
+func isartificial(n *Node) int {
+ // compiler-emitted artificial things that we do not want to instrument,
+ // cant' possibly participate in a data race.
+ if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
+ if n.Sym.Name == "_" {
+ return 1
+ }
+
+ // autotmp's are always local
+ if strings.HasPrefix(n.Sym.Name, "autotmp_") {
+ return 1
+ }
+
+ // statictmp's are read-only
+ if strings.HasPrefix(n.Sym.Name, "statictmp_") {
+ return 1
+ }
+
+ // go.itab is accessed only by the compiler and runtime (assume safe)
+ if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func callinstr(np **Node, init **NodeList, wr int, skip int) int {
+ var name string
+ var f *Node
+ var b *Node
+ var n *Node
+ var t *Type
+ var class int
+ var hascalls int
+
+ n = *np
+
+ //print("callinstr for %+N [ %O ] etype=%E class=%d\n",
+ // n, n->op, n->type ? n->type->etype : -1, n->class);
+
+ if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
+ return 0
+ }
+ t = n.Type
+ if isartificial(n) != 0 {
+ return 0
+ }
+
+ b = outervalue(n)
+
+ // it skips e.g. stores to ... parameter array
+ if isartificial(b) != 0 {
+ return 0
+ }
+ class = int(b.Class)
+
+ // BUG: we _may_ want to instrument PAUTO sometimes
+ // e.g. if we've got a local variable/method receiver
+ // that has got a pointer inside. Whether it points to
+ // the heap or not is impossible to know at compile time
+ if (class&PHEAP != 0) || class == PPARAMREF || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
+ hascalls = 0
+ foreach(n, hascallspred, &hascalls)
+ if hascalls != 0 {
+ n = detachexpr(n, init)
+ *np = n
+ }
+
+ n = treecopy(n)
+ makeaddable(n)
+ if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+ name = "racereadrange"
+ if wr != 0 {
+ name = "racewriterange"
+ }
+ f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
+ } else {
+ name = "raceread"
+ if wr != 0 {
+ name = "racewrite"
+ }
+ f = mkcall(name, nil, init, uintptraddr(n))
+ }
+
+ *init = list(*init, f)
+ return 1
+ }
+
+ return 0
+}
+
+// makeaddable returns a node whose memory location is the
+// same as n, but which is addressable in the Go language
+// sense.
+// This is different from functions like cheapexpr that may make
+// a copy of their argument.
+func makeaddable(n *Node) {
+ // The arguments to uintptraddr technically have an address but
+ // may not be addressable in the Go sense: for example, in the case
+ // of T(v).Field where T is a struct type and v is
+ // an addressable value.
+ switch n.Op {
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) != 0 {
+ makeaddable(n.Left)
+ }
+
+ // Turn T(v).Field into v.Field
+ case ODOT,
+ OXDOT:
+ if n.Left.Op == OCONVNOP {
+ n.Left = n.Left.Left
+ }
+ makeaddable(n.Left)
+
+ // nothing to do
+ case ODOTPTR:
+ fallthrough
+ default:
+ break
+ }
+}
+
+func uintptraddr(n *Node) *Node {
+ var r *Node
+
+ r = Nod(OADDR, n, nil)
+ r.Bounded = 1
+ r = conv(r, Types[TUNSAFEPTR])
+ r = conv(r, Types[TUINTPTR])
+ return r
+}
+
+func detachexpr(n *Node, init **NodeList) *Node {
+ var addr *Node
+ var as *Node
+ var ind *Node
+ var l *Node
+
+ addr = Nod(OADDR, n, nil)
+ l = temp(Ptrto(n.Type))
+ as = Nod(OAS, l, addr)
+ typecheck(&as, Etop)
+ walkexpr(&as, init)
+ *init = list(*init, as)
+ ind = Nod(OIND, l, nil)
+ typecheck(&ind, Erv)
+ walkexpr(&ind, init)
+ return ind
+}
+
+func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
+ if n != nil {
+ f(n, c)
+ }
+}
+
+func foreachlist(l *NodeList, f func(*Node, interface{}), c interface{}) {
+ for ; l != nil; l = l.Next {
+ foreachnode(l.N, f, c)
+ }
+}
+
+func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
+ foreachlist(n.Ninit, f, c)
+ foreachnode(n.Left, f, c)
+ foreachnode(n.Right, f, c)
+ foreachlist(n.List, f, c)
+ foreachnode(n.Ntest, f, c)
+ foreachnode(n.Nincr, f, c)
+ foreachlist(n.Nbody, f, c)
+ foreachlist(n.Nelse, f, c)
+ foreachlist(n.Rlist, f, c)
+}
+
+func hascallspred(n *Node, c interface{}) {
+ switch n.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ (*c.(*int))++
+ }
+}
+
+// appendinit is like addinit in subr.c
+// but appends rather than prepends.
+func appendinit(np **Node, init *NodeList) {
+ var n *Node
+
+ if init == nil {
+ return
+ }
+
+ n = *np
+ switch n.Op {
+ // There may be multiple refs to this node;
+ // introduce OCONVNOP to hold init list.
+ case ONAME,
+ OLITERAL:
+ n = Nod(OCONVNOP, n, nil)
+
+ n.Type = n.Left.Type
+ n.Typecheck = 1
+ *np = n
+ }
+
+ n.Ninit = concat(n.Ninit, init)
+ n.Ullman = UINF
+}
diff --git a/src/cmd/internal/gc/range.go b/src/cmd/internal/gc/range.go
new file mode 100644
index 0000000000..1e33da385e
--- /dev/null
+++ b/src/cmd/internal/gc/range.go
@@ -0,0 +1,426 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * range
+ */
+func typecheckrange(n *Node) {
+ var toomany int
+ var why string
+ var t *Type
+ var t1 *Type
+ var t2 *Type
+ var v1 *Node
+ var v2 *Node
+ var ll *NodeList
+
+ // Typechecking order is important here:
+ // 0. first typecheck range expression (slice/map/chan),
+ // it is evaluated only once and so logically it is not part of the loop.
+ // 1. typcheck produced values,
+ // this part can declare new vars and so it must be typechecked before body,
+ // because body can contain a closure that captures the vars.
+ // 2. decldepth++ to denote loop body.
+ // 3. typecheck body.
+ // 4. decldepth--.
+
+ typecheck(&n.Right, Erv)
+
+ t = n.Right.Type
+ if t == nil {
+ goto out
+ }
+
+ // delicate little dance. see typecheckas2
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Defn != n {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ t = t.Type
+ }
+ n.Type = t
+
+ toomany = 0
+ switch t.Etype {
+ default:
+ Yyerror("cannot range over %v", Nconv(n.Right, obj.FmtLong))
+ goto out
+
+ case TARRAY:
+ t1 = Types[TINT]
+ t2 = t.Type
+
+ case TMAP:
+ t1 = t.Down
+ t2 = t.Type
+
+ case TCHAN:
+ if !(t.Chan&Crecv != 0) {
+ Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
+ goto out
+ }
+
+ t1 = t.Type
+ t2 = nil
+ if count(n.List) == 2 {
+ toomany = 1
+ }
+
+ case TSTRING:
+ t1 = Types[TINT]
+ t2 = runetype
+ }
+
+ if count(n.List) > 2 || toomany != 0 {
+ Yyerror("too many variables in range")
+ }
+
+ v1 = nil
+ if n.List != nil {
+ v1 = n.List.N
+ }
+ v2 = nil
+ if n.List != nil && n.List.Next != nil {
+ v2 = n.List.Next.N
+ }
+
+ // this is not only a optimization but also a requirement in the spec.
+ // "if the second iteration variable is the blank identifier, the range
+ // clause is equivalent to the same clause with only the first variable
+ // present."
+ if isblank(v2) {
+ if v1 != nil {
+ n.List = list1(v1)
+ }
+ v2 = nil
+ }
+
+ if v1 != nil {
+ if v1.Defn == n {
+ v1.Type = t1
+ } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
+ Yyerror("cannot assign type %v to %v in range%s", Tconv(t1, 0), Nconv(v1, obj.FmtLong), why)
+ }
+ checkassign(n, v1)
+ }
+
+ if v2 != nil {
+ if v2.Defn == n {
+ v2.Type = t2
+ } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
+ Yyerror("cannot assign type %v to %v in range%s", Tconv(t2, 0), Nconv(v2, obj.FmtLong), why)
+ }
+ checkassign(n, v2)
+ }
+
+ // second half of dance
+out:
+ n.Typecheck = 1
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Typecheck == 0 {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ decldepth++
+ typechecklist(n.Nbody, Etop)
+ decldepth--
+}
+
+func walkrange(n *Node) {
+ var ohv1 *Node
+ var hv1 *Node // hidden (old) val 1, 2 // hidden aggregate, iterator // hidden len, pointer // hidden bool // not hidden aggregate, val 1, 2
+ var hv2 *Node
+ var ha *Node
+ var hit *Node
+ var hn *Node
+ var hp *Node
+ var hb *Node
+ var a *Node
+ var v1 *Node
+ var v2 *Node
+ var fn *Node
+ var tmp *Node
+ var keyname *Node
+ var valname *Node
+ var key *Node
+ var val *Node
+ var body *NodeList
+ var init *NodeList
+ var th *Type
+ var t *Type
+ var lno int
+
+ t = n.Type
+ init = nil
+
+ a = n.Right
+ lno = int(setlineno(a))
+
+ v1 = nil
+ if n.List != nil {
+ v1 = n.List.N
+ }
+ v2 = nil
+ if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
+ v2 = n.List.Next.N
+ }
+
+ // n->list has no meaning anymore, clear it
+ // to avoid erroneous processing by racewalk.
+ n.List = nil
+
+ hv2 = nil
+
+ switch t.Etype {
+ default:
+ Fatal("walkrange")
+ fallthrough
+
+ // Lower n into runtime·memclr if possible, for
+ // fast zeroing of slices and arrays (issue 5373).
+ // Look for instances of
+ //
+ // for i := range a {
+ // a[i] = zero
+ // }
+ //
+ // in which the evaluation of a is side-effect-free.
+ case TARRAY:
+ if !(Debug['N'] != 0) {
+ if !(flag_race != 0) {
+ if v1 != nil {
+ if v2 == nil {
+ if n.Nbody != nil {
+ if n.Nbody.N != nil { // at least one statement in body
+ if n.Nbody.Next == nil { // at most one statement in body
+ tmp = n.Nbody.N // first statement of body
+ if tmp.Op == OAS {
+ if tmp.Left.Op == OINDEX {
+ if samesafeexpr(tmp.Left.Left, a) != 0 {
+ if samesafeexpr(tmp.Left.Right, v1) != 0 {
+ if t.Type.Width > 0 {
+ if iszero(tmp.Right) != 0 {
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr(hp, hn)
+ // i = len(a) - 1
+ // }
+ n.Op = OIF
+
+ n.Nbody = nil
+ n.Ntest = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
+ n.Nincr = nil
+
+ // hp = &a[0]
+ hp = temp(Ptrto(Types[TUINT8]))
+
+ tmp = Nod(OINDEX, a, Nodintconst(0))
+ tmp.Bounded = 1
+ tmp = Nod(OADDR, tmp, nil)
+ tmp = Nod(OCONVNOP, tmp, nil)
+ tmp.Type = Ptrto(Types[TUINT8])
+ n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn = temp(Types[TUINTPTR])
+
+ tmp = Nod(OLEN, a, nil)
+ tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
+ tmp = conv(tmp, Types[TUINTPTR])
+ n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
+
+ // memclr(hp, hn)
+ fn = mkcall("memclr", nil, nil, hp, hn)
+
+ n.Nbody = list(n.Nbody, fn)
+
+ // i = len(a) - 1
+ v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
+
+ n.Nbody = list(n.Nbody, v1)
+
+ typecheck(&n.Ntest, Erv)
+ typechecklist(n.Nbody, Etop)
+ walkstmt(&n)
+ lineno = int32(lno)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // orderstmt arranged for a copy of the array/slice variable if needed.
+ ha = a
+
+ hv1 = temp(Types[TINT])
+ hn = temp(Types[TINT])
+ hp = nil
+
+ init = list(init, Nod(OAS, hv1, nil))
+ init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
+ if v2 != nil {
+ hp = temp(Ptrto(n.Type.Type))
+ tmp = Nod(OINDEX, ha, Nodintconst(0))
+ tmp.Bounded = 1
+ init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
+ }
+
+ n.Ntest = Nod(OLT, hv1, hn)
+ n.Nincr = Nod(OAS, hv1, Nod(OADD, hv1, Nodintconst(1)))
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = list1(Nod(OAS, v1, hv1))
+ } else {
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(v1), v2)
+ a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
+ body = list1(a)
+
+ // Advance pointer as part of increment.
+ // We used to advance the pointer before executing the loop body,
+ // but doing so would make the pointer point past the end of the
+ // array during the final iteration, possibly causing another unrelated
+ // piece of memory not to be garbage collected until the loop finished.
+ // Advancing during the increment ensures that the pointer p only points
+ // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
+ // after which p is dead, so it cannot confuse the collector.
+ tmp = Nod(OADD, hp, Nodintconst(t.Type.Width))
+
+ tmp.Type = hp.Type
+ tmp.Typecheck = 1
+ tmp.Right.Type = Types[Tptr]
+ tmp.Right.Typecheck = 1
+ a = Nod(OAS, hp, tmp)
+ typecheck(&a, Etop)
+ n.Nincr.Ninit = list1(a)
+ }
+
+ // orderstmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ case TMAP:
+ ha = a
+
+ th = hiter(t)
+ hit = n.Alloc
+ hit.Type = th
+ n.Left = nil
+ keyname = newname(th.Type.Sym) // depends on layout of iterator struct. See reflect.c:hiter
+ valname = newname(th.Type.Down.Sym) // ditto
+
+ fn = syslook("mapiterinit", 1)
+
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, th)
+ init = list(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
+ n.Ntest = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
+
+ fn = syslook("mapiternext", 1)
+ argtype(fn, th)
+ n.Nincr = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
+
+ key = Nod(ODOT, hit, keyname)
+ key = Nod(OIND, key, nil)
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = list1(Nod(OAS, v1, key))
+ } else {
+ val = Nod(ODOT, hit, valname)
+ val = Nod(OIND, val, nil)
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(v1), v2)
+ a.Rlist = list(list1(key), val)
+ body = list1(a)
+ }
+
+ // orderstmt arranged for a copy of the channel variable.
+ case TCHAN:
+ ha = a
+
+ n.Ntest = nil
+
+ hv1 = temp(t.Type)
+ hv1.Typecheck = 1
+ if haspointers(t.Type) {
+ init = list(init, Nod(OAS, hv1, nil))
+ }
+ hb = temp(Types[TBOOL])
+
+ n.Ntest = Nod(ONE, hb, Nodbool(0))
+ a = Nod(OAS2RECV, nil, nil)
+ a.Typecheck = 1
+ a.List = list(list1(hv1), hb)
+ a.Rlist = list1(Nod(ORECV, ha, nil))
+ n.Ntest.Ninit = list1(a)
+ if v1 == nil {
+ body = nil
+ } else {
+ body = list1(Nod(OAS, v1, hv1))
+ }
+
+ // orderstmt arranged for a copy of the string variable.
+ case TSTRING:
+ ha = a
+
+ ohv1 = temp(Types[TINT])
+
+ hv1 = temp(Types[TINT])
+ init = list(init, Nod(OAS, hv1, nil))
+
+ if v2 == nil {
+ a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
+ } else {
+ hv2 = temp(runetype)
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(hv1), hv2)
+ fn = syslook("stringiter2", 0)
+ a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
+ }
+
+ n.Ntest = Nod(ONE, hv1, Nodintconst(0))
+ n.Ntest.Ninit = list(list1(Nod(OAS, ohv1, hv1)), a)
+
+ body = nil
+ if v1 != nil {
+ body = list1(Nod(OAS, v1, ohv1))
+ }
+ if v2 != nil {
+ body = list(body, Nod(OAS, v2, hv2))
+ }
+ }
+
+ n.Op = OFOR
+ typechecklist(init, Etop)
+ n.Ninit = concat(n.Ninit, init)
+ typechecklist(n.Ntest.Ninit, Etop)
+ typecheck(&n.Ntest, Erv)
+ typecheck(&n.Nincr, Etop)
+ typechecklist(body, Etop)
+ n.Nbody = concat(body, n.Nbody)
+ walkstmt(&n)
+
+ lineno = int32(lno)
+}
diff --git a/src/cmd/internal/gc/reflect.go b/src/cmd/internal/gc/reflect.go
new file mode 100644
index 0000000000..4be0f1d2f9
--- /dev/null
+++ b/src/cmd/internal/gc/reflect.go
@@ -0,0 +1,1746 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * runtime interface and reflection data structures
+ */
+var signatlist *NodeList
+
+func sigcmp(a *Sig, b *Sig) int {
+ var i int
+
+ i = stringsCompare(a.name, b.name)
+ if i != 0 {
+ return i
+ }
+ if a.pkg == b.pkg {
+ return 0
+ }
+ if a.pkg == nil {
+ return -1
+ }
+ if b.pkg == nil {
+ return +1
+ }
+ return stringsCompare(a.pkg.Path.S, b.pkg.Path.S)
+}
+
+func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
+ var l1 *Sig
+ var l2 *Sig
+ var le *Sig
+
+ if l == nil || l.link == nil {
+ return l
+ }
+
+ l1 = l
+ l2 = l
+ for {
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l1 = l1.link
+ }
+
+ l2 = l1.link
+ l1.link = nil
+ l1 = lsort(l, f)
+ l2 = lsort(l2, f)
+
+ /* set up lead element */
+ if f(l1, l2) < 0 {
+ l = l1
+ l1 = l1.link
+ } else {
+ l = l2
+ l2 = l2.link
+ }
+
+ le = l
+
+ for {
+ if l1 == nil {
+ for l2 != nil {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+
+ le.link = nil
+ break
+ }
+
+ if l2 == nil {
+ for l1 != nil {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ }
+
+ break
+ }
+
+ if f(l1, l2) < 0 {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ } else {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+ }
+
+ le.link = nil
+ return l
+}
+
+// Builds a type respresenting a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with ../../runtime/hashmap.c!
+const (
+ BUCKETSIZE = 8
+ MAXKEYSIZE = 128
+ MAXVALSIZE = 128
+)
+
+func makefield(name string, t *Type) *Type {
+ var f *Type
+
+ f = typ(TFIELD)
+ f.Type = t
+ f.Sym = new(Sym)
+ f.Sym.Name = name
+ return f
+}
+
+func mapbucket(t *Type) *Type {
+ var keytype *Type
+ var valtype *Type
+ var bucket *Type
+ var arr *Type
+ var field [4]*Type
+ var n int32
+
+ if t.Bucket != nil {
+ return t.Bucket
+ }
+
+ bucket = typ(TSTRUCT)
+ keytype = t.Down
+ valtype = t.Type
+ dowidth(keytype)
+ dowidth(valtype)
+ if keytype.Width > MAXKEYSIZE {
+ keytype = Ptrto(keytype)
+ }
+ if valtype.Width > MAXVALSIZE {
+ valtype = Ptrto(valtype)
+ }
+
+ // The first field is: uint8 topbits[BUCKETSIZE].
+ arr = typ(TARRAY)
+
+ arr.Type = Types[TUINT8]
+ arr.Bound = BUCKETSIZE
+ field[0] = makefield("topbits", arr)
+ arr = typ(TARRAY)
+ arr.Type = keytype
+ arr.Bound = BUCKETSIZE
+ field[1] = makefield("keys", arr)
+ arr = typ(TARRAY)
+ arr.Type = valtype
+ arr.Bound = BUCKETSIZE
+ field[2] = makefield("values", arr)
+ field[3] = makefield("overflow", Ptrto(bucket))
+
+ // link up fields
+ bucket.Noalg = 1
+
+ bucket.Local = t.Local
+ bucket.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(bucket)
+
+ // Pad to the native integer alignment.
+ // This is usually the same as widthptr; the exception (as usual) is amd64p32.
+ if Widthreg > Widthptr {
+ bucket.Width += int64(Widthreg) - int64(Widthptr)
+ }
+
+ // See comment on hmap.overflow in ../../runtime/hashmap.go.
+ if !haspointers(t.Type) && !haspointers(t.Down) {
+ bucket.Haspointers = 1 // no pointers
+ }
+
+ t.Bucket = bucket
+
+ bucket.Map = t
+ return bucket
+}
+
+// Builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with ../../runtime/hashmap.go!
+func hmap(t *Type) *Type {
+ var h *Type
+ var bucket *Type
+ var field [8]*Type
+ var n int32
+
+ if t.Hmap != nil {
+ return t.Hmap
+ }
+
+ bucket = mapbucket(t)
+ field[0] = makefield("count", Types[TINT])
+ field[1] = makefield("flags", Types[TUINT8])
+ field[2] = makefield("B", Types[TUINT8])
+ field[3] = makefield("hash0", Types[TUINT32])
+ field[4] = makefield("buckets", Ptrto(bucket))
+ field[5] = makefield("oldbuckets", Ptrto(bucket))
+ field[6] = makefield("nevacuate", Types[TUINTPTR])
+ field[7] = makefield("overflow", Types[TUNSAFEPTR])
+
+ h = typ(TSTRUCT)
+ h.Noalg = 1
+ h.Local = t.Local
+ h.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(h)
+ t.Hmap = h
+ h.Map = t
+ return h
+}
+
+func hiter(t *Type) *Type {
+ var n int32
+ var field [12]*Type
+ var i *Type
+
+ if t.Hiter != nil {
+ return t.Hiter
+ }
+
+ // build a struct:
+ // hash_iter {
+ // key *Key
+ // val *Value
+ // t *MapType
+ // h *Hmap
+ // buckets *Bucket
+ // bptr *Bucket
+ // overflow0 unsafe.Pointer
+ // overflow1 unsafe.Pointer
+ // startBucket uintptr
+ // stuff uintptr
+ // bucket uintptr
+ // checkBucket uintptr
+ // }
+ // must match ../../runtime/hashmap.c:hash_iter.
+ field[0] = makefield("key", Ptrto(t.Down))
+
+ field[1] = makefield("val", Ptrto(t.Type))
+ field[2] = makefield("t", Ptrto(Types[TUINT8]))
+ field[3] = makefield("h", Ptrto(hmap(t)))
+ field[4] = makefield("buckets", Ptrto(mapbucket(t)))
+ field[5] = makefield("bptr", Ptrto(mapbucket(t)))
+ field[6] = makefield("overflow0", Types[TUNSAFEPTR])
+ field[7] = makefield("overflow1", Types[TUNSAFEPTR])
+ field[8] = makefield("startBucket", Types[TUINTPTR])
+ field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I
+ field[10] = makefield("bucket", Types[TUINTPTR])
+ field[11] = makefield("checkBucket", Types[TUINTPTR])
+
+ // build iterator struct holding the above fields
+ i = typ(TSTRUCT)
+
+ i.Noalg = 1
+ i.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(i)
+ if i.Width != int64(12*Widthptr) {
+ Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
+ }
+ t.Hiter = i
+ i.Map = t
+ return i
+}
+
+/*
+ * f is method type, with receiver.
+ * return function type, receiver as first argument (or not).
+ */
+func methodfunc(f *Type, receiver *Type) *Type {
+ var in *NodeList
+ var out *NodeList
+ var d *Node
+ var t *Type
+
+ in = nil
+ if receiver != nil {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = receiver
+ in = list(in, d)
+ }
+
+ for t = getinargx(f).Type; t != nil; t = t.Down {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = t.Type
+ d.Isddd = t.Isddd
+ in = list(in, d)
+ }
+
+ out = nil
+ for t = getoutargx(f).Type; t != nil; t = t.Down {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = t.Type
+ out = list(out, d)
+ }
+
+ t = functype(nil, in, out)
+ if f.Nname != nil {
+ // Link to name of original method function.
+ t.Nname = f.Nname
+ }
+
+ return t
+}
+
+/*
+ * return methods of non-interface type t, sorted by name.
+ * generates stub functions as needed.
+ */
+func methods(t *Type) *Sig {
+ var f *Type
+ var mt *Type
+ var it *Type
+ var this *Type
+ var a *Sig
+ var b *Sig
+ var method *Sym
+
+ // method type
+ mt = methtype(t, 0)
+
+ if mt == nil {
+ return nil
+ }
+ expandmeth(mt)
+
+ // type stored in interface word
+ it = t
+
+ if !(isdirectiface(it) != 0) {
+ it = Ptrto(t)
+ }
+
+ // make list of methods for t,
+ // generating code if necessary.
+ a = nil
+
+ for f = mt.Xmethod; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("methods: not field %v", Tconv(f, 0))
+ }
+ if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
+ Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
+ }
+ if !(getthisx(f.Type).Type != nil) {
+ Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
+ }
+ if f.Nointerface != 0 {
+ continue
+ }
+
+ method = f.Sym
+ if method == nil {
+ continue
+ }
+
+ // get receiver type for this particular method.
+ // if pointer receiver but non-pointer t and
+ // this is not an embedded pointer inside a struct,
+ // method does not apply.
+ this = getthisx(f.Type).Type.Type
+
+ if Isptr[this.Etype] != 0 && this.Type == t {
+ continue
+ }
+ if Isptr[this.Etype] != 0 && !(Isptr[t.Etype] != 0) && f.Embedded != 2 && !(isifacemethod(f.Type) != 0) {
+ continue
+ }
+
+ b = new(Sig)
+ b.link = a
+ a = b
+
+ a.name = method.Name
+ if !exportname(method.Name) {
+ if method.Pkg == nil {
+ Fatal("methods: missing package")
+ }
+ a.pkg = method.Pkg
+ }
+
+ a.isym = methodsym(method, it, 1)
+ a.tsym = methodsym(method, t, 0)
+ a.type_ = methodfunc(f.Type, t)
+ a.mtype = methodfunc(f.Type, nil)
+
+ if !(a.isym.Flags&SymSiggen != 0) {
+ a.isym.Flags |= SymSiggen
+ if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
+ compiling_wrappers = 1
+ genwrapper(it, f, a.isym, 1)
+ compiling_wrappers = 0
+ }
+ }
+
+ if !(a.tsym.Flags&SymSiggen != 0) {
+ a.tsym.Flags |= SymSiggen
+ if !Eqtype(this, t) {
+ compiling_wrappers = 1
+ genwrapper(t, f, a.tsym, 0)
+ compiling_wrappers = 0
+ }
+ }
+ }
+
+ return lsort(a, sigcmp)
+}
+
+/*
+ * return methods of interface type t, sorted by name.
+ */
+func imethods(t *Type) *Sig {
+ var a *Sig
+ var all *Sig
+ var last *Sig
+ var f *Type
+ var method *Sym
+ var isym *Sym
+
+ all = nil
+ last = nil
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("imethods: not field")
+ }
+ if f.Type.Etype != TFUNC || f.Sym == nil {
+ continue
+ }
+ method = f.Sym
+ a = new(Sig)
+ a.name = method.Name
+ if !exportname(method.Name) {
+ if method.Pkg == nil {
+ Fatal("imethods: missing package")
+ }
+ a.pkg = method.Pkg
+ }
+
+ a.mtype = f.Type
+ a.offset = 0
+ a.type_ = methodfunc(f.Type, nil)
+
+ if last != nil && sigcmp(last, a) >= 0 {
+ Fatal("sigcmp vs sortinter %s %s", last.name, a.name)
+ }
+ if last == nil {
+ all = a
+ } else {
+ last.link = a
+ }
+ last = a
+
+ // Compiler can only refer to wrappers for non-blank methods.
+ if isblanksym(method) {
+ continue
+ }
+
+ // NOTE(rsc): Perhaps an oversight that
+ // IfaceType.Method is not in the reflect data.
+ // Generate the method body, so that compiled
+ // code can refer to it.
+ isym = methodsym(method, t, 0)
+
+ if !(isym.Flags&SymSiggen != 0) {
+ isym.Flags |= SymSiggen
+ genwrapper(t, f, isym, 0)
+ }
+ }
+
+ return all
+}
+
+var dimportpath_gopkg *Pkg
+
+func dimportpath(p *Pkg) {
+ var nam string
+ var n *Node
+
+ if p.Pathsym != nil {
+ return
+ }
+
+ if dimportpath_gopkg == nil {
+ dimportpath_gopkg = mkpkg(newstrlit("go"))
+ dimportpath_gopkg.Name = "go"
+ }
+
+ nam = fmt.Sprintf("importpath.%s.", p.Prefix)
+
+ n = Nod(ONAME, nil, nil)
+ n.Sym = Pkglookup(nam, dimportpath_gopkg)
+
+ n.Class = PEXTERN
+ n.Xoffset = 0
+ p.Pathsym = n.Sym
+
+ gdatastring(n, p.Path)
+ ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA)
+}
+
+func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
+ if pkg == nil {
+ return dgostringptr(s, ot, "")
+ }
+
+ // Emit reference to go.importpath.""., which 6l will
+ // rewrite using the correct import path. Every package
+ // that imports this one directly defines the symbol.
+ if pkg == localpkg {
+ var ns *Sym
+
+ if ns == nil {
+ ns = Pkglookup("importpath.\"\".", mkpkg(newstrlit("go")))
+ }
+ return dsymptr(s, ot, ns, 0)
+ }
+
+ dimportpath(pkg)
+ return dsymptr(s, ot, pkg.Pathsym, 0)
+}
+
+/*
+ * uncommonType
+ * ../../runtime/type.go:/uncommonType
+ */
+func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
+ var ot int
+ var n int
+ var s *Sym
+ var a *Sig
+ var m *Sig
+
+ m = methods(t)
+ if t.Sym == nil && m == nil {
+ return off
+ }
+
+ // fill in *extraType pointer in header
+ off = int(Rnd(int64(off), int64(Widthptr)))
+
+ dsymptr(sym, ptroff, sym, off)
+
+ n = 0
+ for a = m; a != nil; a = a.link {
+ dtypesym(a.type_)
+ n++
+ }
+
+ ot = off
+ s = sym
+ if t.Sym != nil {
+ ot = dgostringptr(s, ot, t.Sym.Name)
+ if t != Types[t.Etype] && t != errortype {
+ ot = dgopkgpath(s, ot, t.Sym.Pkg)
+ } else {
+ ot = dgostringptr(s, ot, "")
+ }
+ } else {
+ ot = dgostringptr(s, ot, "")
+ ot = dgostringptr(s, ot, "")
+ }
+
+ // slice header
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+
+ // methods
+ for a = m; a != nil; a = a.link {
+ // method
+ // ../../runtime/type.go:/method
+ ot = dgostringptr(s, ot, a.name)
+
+ ot = dgopkgpath(s, ot, a.pkg)
+ ot = dsymptr(s, ot, dtypesym(a.mtype), 0)
+ ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+ if a.isym != nil {
+ ot = dsymptr(s, ot, a.isym, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ if a.tsym != nil {
+ ot = dsymptr(s, ot, a.tsym, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ }
+
+ return ot
+}
+
+var kinds = []int{
+ TINT: obj.KindInt,
+ TUINT: obj.KindUint,
+ TINT8: obj.KindInt8,
+ TUINT8: obj.KindUint8,
+ TINT16: obj.KindInt16,
+ TUINT16: obj.KindUint16,
+ TINT32: obj.KindInt32,
+ TUINT32: obj.KindUint32,
+ TINT64: obj.KindInt64,
+ TUINT64: obj.KindUint64,
+ TUINTPTR: obj.KindUintptr,
+ TFLOAT32: obj.KindFloat32,
+ TFLOAT64: obj.KindFloat64,
+ TBOOL: obj.KindBool,
+ TSTRING: obj.KindString,
+ TPTR32: obj.KindPtr,
+ TPTR64: obj.KindPtr,
+ TSTRUCT: obj.KindStruct,
+ TINTER: obj.KindInterface,
+ TCHAN: obj.KindChan,
+ TMAP: obj.KindMap,
+ TARRAY: obj.KindArray,
+ TFUNC: obj.KindFunc,
+ TCOMPLEX64: obj.KindComplex64,
+ TCOMPLEX128: obj.KindComplex128,
+ TUNSAFEPTR: obj.KindUnsafePointer,
+}
+
+func haspointers(t *Type) bool {
+ var t1 *Type
+ var ret int
+
+ if t.Haspointers != 0 {
+ return t.Haspointers-1 != 0
+ }
+
+ switch t.Etype {
+ case TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TBOOL:
+ ret = 0
+
+ case TARRAY:
+ if t.Bound < 0 { // slice
+ ret = 1
+ break
+ }
+
+ if t.Bound == 0 { // empty array
+ ret = 0
+ break
+ }
+
+ ret = bool2int(haspointers(t.Type))
+
+ case TSTRUCT:
+ ret = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if haspointers(t1.Type) {
+ ret = 1
+ break
+ }
+ }
+
+ case TSTRING,
+ TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TINTER,
+ TCHAN,
+ TMAP,
+ TFUNC:
+ fallthrough
+ default:
+ ret = 1
+ }
+
+ t.Haspointers = uint8(1 + ret)
+ return ret != 0
+}
+
+/*
+ * commonType
+ * ../../runtime/type.go:/commonType
+ */
+
+var dcommontype_algarray *Sym
+
+func dcommontype(s *Sym, ot int, t *Type) int {
+ var i int
+ var alg int
+ var sizeofAlg int
+ var gcprog int
+ var sptr *Sym
+ var algsym *Sym
+ var zero *Sym
+ var gcprog0 *Sym
+ var gcprog1 *Sym
+ var sbits *Sym
+ var gcmask [16]uint8
+ var x1 uint64
+ var x2 uint64
+ var p string
+
+ if ot != 0 {
+ Fatal("dcommontype %d", ot)
+ }
+
+ sizeofAlg = 2 * Widthptr
+ if dcommontype_algarray == nil {
+ dcommontype_algarray = Pkglookup("algarray", Runtimepkg)
+ }
+ dowidth(t)
+ alg = algtype(t)
+ algsym = nil
+ if alg < 0 || alg == AMEM {
+ algsym = dalgsym(t)
+ }
+
+ if t.Sym != nil && !(Isptr[t.Etype] != 0) {
+ sptr = dtypesym(Ptrto(t))
+ } else {
+ sptr = weaktypesym(Ptrto(t))
+ }
+
+ // All (non-reflect-allocated) Types share the same zero object.
+ // Each place in the compiler where a pointer to the zero object
+ // might be returned by a runtime call (map access return value,
+ // 2-arg type cast) declares the size of the zerovalue it needs.
+ // The linker magically takes the max of all the sizes.
+ zero = Pkglookup("zerovalue", Runtimepkg)
+
+ // We use size 0 here so we get the pointer to the zero value,
+ // but don't allocate space for the zero value unless we need it.
+ // TODO: how do we get this symbol into bss? We really want
+ // a read-only bss, but I don't think such a thing exists.
+
+ // ../../pkg/reflect/type.go:/^type.commonType
+ // actual type structure
+ // type commonType struct {
+ // size uintptr
+ // hash uint32
+ // _ uint8
+ // align uint8
+ // fieldAlign uint8
+ // kind uint8
+ // alg unsafe.Pointer
+ // gc unsafe.Pointer
+ // string *string
+ // *extraType
+ // ptrToThis *Type
+ // zero unsafe.Pointer
+ // }
+ ot = duintptr(s, ot, uint64(t.Width))
+
+ ot = duint32(s, ot, typehash(t))
+ ot = duint8(s, ot, 0) // unused
+
+ // runtime (and common sense) expects alignment to be a power of two.
+ i = int(t.Align)
+
+ if i == 0 {
+ i = 1
+ }
+ if i&(i-1) != 0 {
+ Fatal("invalid alignment %d for %v", t.Align, Tconv(t, 0))
+ }
+ ot = duint8(s, ot, t.Align) // align
+ ot = duint8(s, ot, t.Align) // fieldAlign
+
+ gcprog = usegcprog(t)
+
+ i = kinds[t.Etype]
+ if t.Etype == TARRAY && t.Bound < 0 {
+ i = obj.KindSlice
+ }
+ if !haspointers(t) {
+ i |= obj.KindNoPointers
+ }
+ if isdirectiface(t) != 0 {
+ i |= obj.KindDirectIface
+ }
+ if gcprog != 0 {
+ i |= obj.KindGCProg
+ }
+ ot = duint8(s, ot, uint8(i)) // kind
+ if algsym == nil {
+ ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg)
+ } else {
+ ot = dsymptr(s, ot, algsym, 0)
+ }
+
+ // gc
+ if gcprog != 0 {
+ gengcprog(t, &gcprog0, &gcprog1)
+ if gcprog0 != nil {
+ ot = dsymptr(s, ot, gcprog0, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ ot = dsymptr(s, ot, gcprog1, 0)
+ } else {
+ gengcmask(t, gcmask[:])
+ x1 = 0
+ for i = 0; i < 8; i++ {
+ x1 = x1<<8 | uint64(gcmask[i])
+ }
+ if Widthptr == 4 {
+ p = fmt.Sprintf("gcbits.0x%016x", x1)
+ } else {
+ x2 = 0
+ for i = 0; i < 8; i++ {
+ x2 = x2<<8 | uint64(gcmask[i+8])
+ }
+ p = fmt.Sprintf("gcbits.0x%016x%016x", x1, x2)
+ }
+
+ sbits = Pkglookup(p, Runtimepkg)
+ if sbits.Flags&SymUniq == 0 {
+ sbits.Flags |= SymUniq
+ for i = 0; i < 2*Widthptr; i++ {
+ duint8(sbits, i, gcmask[i])
+ }
+ ggloblsym(sbits, 2*int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ ot = dsymptr(s, ot, sbits, 0)
+ ot = duintptr(s, ot, 0)
+ }
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+
+ //print("dcommontype: %s\n", p);
+ ot = dgostringptr(s, ot, p) // string
+
+ // skip pointer to extraType,
+ // which follows the rest of this type structure.
+ // caller will fill in if needed.
+ // otherwise linker will assume 0.
+ ot += Widthptr
+
+ ot = dsymptr(s, ot, sptr, 0) // ptrto type
+ ot = dsymptr(s, ot, zero, 0) // ptr to zero value
+ return ot
+}
+
+func typesym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, typepkg)
+
+ //print("typesym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func tracksym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v.%s", Tconv(t.Outer, obj.FmtLeft), t.Sym.Name)
+ s = Pkglookup(p, trackpkg)
+
+ return s
+}
+
+func typelinksym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ // %-uT is what the generated Type's string field says.
+ // It uses (ambiguous) package names instead of import paths.
+ // %-T is the complete, unambiguous type name.
+ // We want the types to end up sorted by string field,
+ // so use that first in the name, and then add :%-T to
+ // disambiguate. The names are a little long but they are
+ // discarded by the linker and do not end up in the symbol
+ // table of the final binary.
+ p = fmt.Sprintf("%v/%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned), Tconv(t, obj.FmtLeft))
+
+ s = Pkglookup(p, typelinkpkg)
+
+ //print("typelinksym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func typesymprefix(prefix string, t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%s.%v", prefix, Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, typepkg)
+
+ //print("algsym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func typenamesym(t *Type) *Sym {
+ var s *Sym
+ var n *Node
+
+ if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) != 0 {
+ Fatal("typename %v", Tconv(t, 0))
+ }
+ s = typesym(t)
+ if s.Def == nil {
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ n.Type = Types[TUINT8]
+ n.Addable = 1
+ n.Ullman = 1
+ n.Class = PEXTERN
+ n.Xoffset = 0
+ n.Typecheck = 1
+ s.Def = n
+
+ signatlist = list(signatlist, typenod(t))
+ }
+
+ return s.Def.Sym
+}
+
+func typename(t *Type) *Node {
+ var s *Sym
+ var n *Node
+
+ s = typenamesym(t)
+ n = Nod(OADDR, s.Def, nil)
+ n.Type = Ptrto(s.Def.Type)
+ n.Addable = 1
+ n.Ullman = 2
+ n.Typecheck = 1
+ return n
+}
+
+func weaktypesym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, weaktypepkg)
+
+ //print("weaktypesym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+/*
+ * Returns 1 if t has a reflexive equality operator.
+ * That is, if x==x for all x of type t.
+ */
+func isreflexive(t *Type) int {
+ var t1 *Type
+ switch t.Etype {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return 1
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return 0
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ Fatal("slice can't be a map key: %v", Tconv(t, 0))
+ }
+ return isreflexive(t.Type)
+
+ case TSTRUCT:
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if !(isreflexive(t1.Type) != 0) {
+ return 0
+ }
+ }
+
+ return 1
+
+ default:
+ Fatal("bad type for map key: %v", Tconv(t, 0))
+ return 0
+ }
+}
+
+func dtypesym(t *Type) *Sym {
+ var ot int
+ var xt int
+ var n int
+ var isddd int
+ var dupok int
+ var s *Sym
+ var s1 *Sym
+ var s2 *Sym
+ var s3 *Sym
+ var s4 *Sym
+ var slink *Sym
+ var a *Sig
+ var m *Sig
+ var t1 *Type
+ var tbase *Type
+ var t2 *Type
+
+ // Replace byte, rune aliases with real type.
+ // They've been separate internally to make error messages
+ // better, but we have to merge them in the reflect tables.
+ if t == bytetype || t == runetype {
+ t = Types[t.Etype]
+ }
+
+ if isideal(t) != 0 {
+ Fatal("dtypesym %v", Tconv(t, 0))
+ }
+
+ s = typesym(t)
+ if s.Flags&SymSiggen != 0 {
+ return s
+ }
+ s.Flags |= SymSiggen
+
+ // special case (look for runtime below):
+ // when compiling package runtime,
+ // emit the type structures for int, float, etc.
+ tbase = t
+
+ if Isptr[t.Etype] != 0 && t.Sym == nil && t.Type.Sym != nil {
+ tbase = t.Type
+ }
+ dupok = 0
+ if tbase.Sym == nil {
+ dupok = obj.DUPOK
+ }
+
+ if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+ goto ok
+ }
+
+ // named types from other files are defined only by those files
+ if tbase.Sym != nil && !(tbase.Local != 0) {
+ return s
+ }
+ if isforw[tbase.Etype] != 0 {
+ return s
+ }
+
+ok:
+ ot = 0
+ xt = 0
+ switch t.Etype {
+ default:
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+
+ case TARRAY:
+ if t.Bound >= 0 {
+ // ../../runtime/type.go:/ArrayType
+ s1 = dtypesym(t.Type)
+
+ t2 = typ(TARRAY)
+ t2.Type = t.Type
+ t2.Bound = -1 // slice
+ s2 = dtypesym(t2)
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = dsymptr(s, ot, s2, 0)
+ ot = duintptr(s, ot, uint64(t.Bound))
+ } else {
+ // ../../runtime/type.go:/SliceType
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ }
+
+ // ../../runtime/type.go:/ChanType
+ case TCHAN:
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = duintptr(s, ot, uint64(t.Chan))
+
+ case TFUNC:
+ for t1 = getthisx(t).Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ }
+ isddd = 0
+ for t1 = getinargx(t).Type; t1 != nil; t1 = t1.Down {
+ isddd = int(t1.Isddd)
+ dtypesym(t1.Type)
+ }
+
+ for t1 = getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ }
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = duint8(s, ot, uint8(isddd))
+
+ // two slice headers: in and out.
+ ot = int(Rnd(int64(ot), int64(Widthptr)))
+
+ ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint))
+ n = t.Thistuple + t.Intuple
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr)
+ ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+ ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+
+ // slice data
+ for t1 = getthisx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+ for t1 = getinargx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+ for t1 = getoutargx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+
+ case TINTER:
+ m = imethods(t)
+ n = 0
+ for a = m; a != nil; a = a.link {
+ dtypesym(a.type_)
+ n++
+ }
+
+ // ../../runtime/type.go:/InterfaceType
+ ot = dcommontype(s, ot, t)
+
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ for a = m; a != nil; a = a.link {
+ // ../../runtime/type.go:/imethod
+ ot = dgostringptr(s, ot, a.name)
+
+ ot = dgopkgpath(s, ot, a.pkg)
+ ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+ }
+
+ // ../../runtime/type.go:/MapType
+ case TMAP:
+ s1 = dtypesym(t.Down)
+
+ s2 = dtypesym(t.Type)
+ s3 = dtypesym(mapbucket(t))
+ s4 = dtypesym(hmap(t))
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = dsymptr(s, ot, s2, 0)
+ ot = dsymptr(s, ot, s3, 0)
+ ot = dsymptr(s, ot, s4, 0)
+ if t.Down.Width > MAXKEYSIZE {
+ ot = duint8(s, ot, uint8(Widthptr))
+ ot = duint8(s, ot, 1) // indirect
+ } else {
+ ot = duint8(s, ot, uint8(t.Down.Width))
+ ot = duint8(s, ot, 0) // not indirect
+ }
+
+ if t.Type.Width > MAXVALSIZE {
+ ot = duint8(s, ot, uint8(Widthptr))
+ ot = duint8(s, ot, 1) // indirect
+ } else {
+ ot = duint8(s, ot, uint8(t.Type.Width))
+ ot = duint8(s, ot, 0) // not indirect
+ }
+
+ ot = duint16(s, ot, uint16(mapbucket(t).Width))
+ ot = duint8(s, ot, uint8(isreflexive(t.Down)))
+
+ case TPTR32,
+ TPTR64:
+ if t.Type.Etype == TANY {
+ // ../../runtime/type.go:/UnsafePointerType
+ ot = dcommontype(s, ot, t)
+
+ break
+ }
+
+ // ../../runtime/type.go:/PtrType
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+
+ // ../../runtime/type.go:/StructType
+ // for security, only the exported fields.
+ case TSTRUCT:
+ n = 0
+
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ n++
+ }
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ // ../../runtime/type.go:/structField
+ if t1.Sym != nil && !(t1.Embedded != 0) {
+ ot = dgostringptr(s, ot, t1.Sym.Name)
+ if exportname(t1.Sym.Name) {
+ ot = dgostringptr(s, ot, "")
+ } else {
+ ot = dgopkgpath(s, ot, t1.Sym.Pkg)
+ }
+ } else {
+ ot = dgostringptr(s, ot, "")
+ if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
+ ot = dgopkgpath(s, ot, localpkg)
+ } else {
+ ot = dgostringptr(s, ot, "")
+ }
+ }
+
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ ot = dgostrlitptr(s, ot, t1.Note)
+ ot = duintptr(s, ot, uint64(t1.Width)) // field offset
+ }
+ }
+
+ ot = dextratype(s, ot, t, xt)
+ ggloblsym(s, int32(ot), int8(dupok|obj.RODATA))
+
+ // generate typelink.foo pointing at s = type.foo.
+ // The linker will leave a table of all the typelinks for
+ // types in the binary, so reflect can find them.
+ // We only need the link for unnamed composites that
+ // we want be able to find.
+ if t.Sym == nil {
+ switch t.Etype {
+ case TARRAY,
+ TCHAN,
+ TMAP:
+ slink = typelinksym(t)
+ dsymptr(slink, 0, s, 0)
+ ggloblsym(slink, int32(Widthptr), int8(dupok|obj.RODATA))
+ }
+ }
+
+ return s
+}
+
+func dumptypestructs() {
+ var i int
+ var l *NodeList
+ var n *Node
+ var t *Type
+ var p *Pkg
+
+ // copy types from externdcl list to signatlist
+ for l = externdcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OTYPE {
+ continue
+ }
+ signatlist = list(signatlist, n)
+ }
+
+ // process signatlist
+ for l = signatlist; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OTYPE {
+ continue
+ }
+ t = n.Type
+ dtypesym(t)
+ if t.Sym != nil {
+ dtypesym(Ptrto(t))
+ }
+ }
+
+ // generate import strings for imported packages
+ for i = 0; i < len(phash); i++ {
+ for p = phash[i]; p != nil; p = p.Link {
+ if p.Direct != 0 {
+ dimportpath(p)
+ }
+ }
+ }
+
+ // do basic types if compiling package runtime.
+ // they have to be in at least one package,
+ // and runtime is always loaded implicitly,
+ // so this is as good as any.
+ // another possible choice would be package main,
+ // but using runtime means fewer copies in .6 files.
+ if compiling_runtime != 0 {
+ for i = 1; i <= TBOOL; i++ {
+ dtypesym(Ptrto(Types[i]))
+ }
+ dtypesym(Ptrto(Types[TSTRING]))
+ dtypesym(Ptrto(Types[TUNSAFEPTR]))
+
+ // emit type structs for error and func(error) string.
+ // The latter is the type of an auto-generated wrapper.
+ dtypesym(Ptrto(errortype))
+
+ dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING])))))
+
+ // add paths for runtime and main, which 6l imports implicitly.
+ dimportpath(Runtimepkg)
+
+ if flag_race != 0 {
+ dimportpath(racepkg)
+ }
+ dimportpath(mkpkg(newstrlit("main")))
+ }
+}
+
+func dalgsym(t *Type) *Sym {
+ var ot int
+ var s *Sym
+ var hash *Sym
+ var hashfunc *Sym
+ var eq *Sym
+ var eqfunc *Sym
+ var p string
+
+ // dalgsym is only called for a type that needs an algorithm table,
+ // which implies that the type is comparable (or else it would use ANOEQ).
+
+ if algtype(t) == AMEM {
+ // we use one algorithm table for all AMEM types of a given size
+ p = fmt.Sprintf(".alg%d", t.Width)
+
+ s = Pkglookup(p, typepkg)
+
+ if s.Flags&SymAlgGen != 0 {
+ return s
+ }
+ s.Flags |= SymAlgGen
+
+ // make hash closure
+ p = fmt.Sprintf(".hashfunc%d", t.Width)
+
+ hashfunc = Pkglookup(p, typepkg)
+
+ ot = 0
+ ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0)
+ ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure
+ ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
+
+ // make equality closure
+ p = fmt.Sprintf(".eqfunc%d", t.Width)
+
+ eqfunc = Pkglookup(p, typepkg)
+
+ ot = 0
+ ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0)
+ ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr)
+ ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
+ } else {
+ // generate an alg table specific to this type
+ s = typesymprefix(".alg", t)
+
+ hash = typesymprefix(".hash", t)
+ eq = typesymprefix(".eq", t)
+ hashfunc = typesymprefix(".hashfunc", t)
+ eqfunc = typesymprefix(".eqfunc", t)
+
+ genhash(hash, t)
+ geneq(eq, t)
+
+ // make Go funcs (closures) for calling hash and equal from Go
+ dsymptr(hashfunc, 0, hash, 0)
+
+ ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ dsymptr(eqfunc, 0, eq, 0)
+ ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ // ../../runtime/alg.go:/typeAlg
+ ot = 0
+
+ ot = dsymptr(s, ot, hashfunc, 0)
+ ot = dsymptr(s, ot, eqfunc, 0)
+ ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ return s
+}
+
+func usegcprog(t *Type) int {
+ var size int64
+ var nptr int64
+
+ if !haspointers(t) {
+ return 0
+ }
+ if t.Width == BADWIDTH {
+ dowidth(t)
+ }
+
+ // Calculate size of the unrolled GC mask.
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+
+ size = nptr
+ if size%2 != 0 {
+ size *= 2 // repeated
+ }
+ size = size * obj.GcBits / 8 // 4 bits per word
+
+ // Decide whether to use unrolled GC mask or GC program.
+ // We could use a more elaborate condition, but this seems to work well in practice.
+ // For small objects GC program can't give significant reduction.
+ // While large objects usually contain arrays; and even if it don't
+ // the program uses 2-bits per word while mask uses 4-bits per word,
+ // so the program is still smaller.
+ return bool2int(size > int64(2*Widthptr))
+}
+
+// Generates sparse GC bitmask (4 bits per word).
+func gengcmask(t *Type, gcmask []byte) {
+ var vec *Bvec
+ var xoffset int64
+ var nptr int64
+ var i int64
+ var j int64
+ var half int
+ var bits uint8
+ var pos []byte
+
+ for i = 0; i < 16; i++ {
+ gcmask[i] = 0
+ }
+ if !haspointers(t) {
+ return
+ }
+
+ // Generate compact mask as stacks use.
+ xoffset = 0
+
+ vec = bvalloc(2 * int32(Widthptr) * 8)
+ twobitwalktype1(t, &xoffset, vec)
+
+ // Unfold the mask for the GC bitmap format:
+ // 4 bits per word, 2 high bits encode pointer info.
+ pos = gcmask
+
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+ half = 0
+
+ // If number of words is odd, repeat the mask.
+ // This makes simpler handling of arrays in runtime.
+ for j = 0; j <= (nptr % 2); j++ {
+ for i = 0; i < nptr; i++ {
+ bits = uint8(bvget(vec, int32(i*obj.BitsPerPointer)) | bvget(vec, int32(i*obj.BitsPerPointer+1))<<1)
+
+ // Some fake types (e.g. Hmap) has missing fileds.
+ // twobitwalktype1 generates BitsDead for that holes,
+ // replace BitsDead with BitsScalar.
+ if bits == obj.BitsDead {
+ bits = obj.BitsScalar
+ }
+ bits <<= 2
+ if half != 0 {
+ bits <<= 4
+ }
+ pos[0] |= byte(bits)
+ half = bool2int(!(half != 0))
+ if !(half != 0) {
+ pos = pos[1:]
+ }
+ }
+ }
+}
+
+// Helper object for generation of GC programs.
+type ProgGen struct {
+ s *Sym
+ datasize int32
+ data [256 / obj.PointersPerByte]uint8
+ ot int64
+}
+
+func proggeninit(g *ProgGen, s *Sym) {
+ g.s = s
+ g.datasize = 0
+ g.ot = 0
+ g.data = [256 / obj.PointersPerByte]uint8{}
+}
+
+func proggenemit(g *ProgGen, v uint8) {
+ g.ot = int64(duint8(g.s, int(g.ot), v))
+}
+
+// Emits insData block from g->data.
+func proggendataflush(g *ProgGen) {
+ var i int32
+ var s int32
+
+ if g.datasize == 0 {
+ return
+ }
+ proggenemit(g, obj.InsData)
+ proggenemit(g, uint8(g.datasize))
+ s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
+ for i = 0; i < s; i++ {
+ proggenemit(g, g.data[i])
+ }
+ g.datasize = 0
+ g.data = [256 / obj.PointersPerByte]uint8{}
+}
+
+func proggendata(g *ProgGen, d uint8) {
+ g.data[g.datasize/obj.PointersPerByte] |= d << uint((g.datasize%obj.PointersPerByte)*obj.BitsPerPointer)
+ g.datasize++
+ if g.datasize == 255 {
+ proggendataflush(g)
+ }
+}
+
+// Skip v bytes due to alignment, etc.
+func proggenskip(g *ProgGen, off int64, v int64) {
+ var i int64
+
+ for i = off; i < off+v; i++ {
+ if (i % int64(Widthptr)) == 0 {
+ proggendata(g, obj.BitsScalar)
+ }
+ }
+}
+
+// Emit insArray instruction.
+func proggenarray(g *ProgGen, len int64) {
+ var i int32
+
+ proggendataflush(g)
+ proggenemit(g, obj.InsArray)
+ for i = 0; i < int32(Widthptr); (func() { i++; len >>= 8 })() {
+ proggenemit(g, uint8(len))
+ }
+}
+
+func proggenarrayend(g *ProgGen) {
+ proggendataflush(g)
+ proggenemit(g, obj.InsArrayEnd)
+}
+
+func proggenfini(g *ProgGen) int64 {
+ proggendataflush(g)
+ proggenemit(g, obj.InsEnd)
+ return g.ot
+}
+
+// Generates GC program for large types.
+func gengcprog(t *Type, pgc0 **Sym, pgc1 **Sym) {
+ var gc0 *Sym
+ var gc1 *Sym
+ var nptr int64
+ var size int64
+ var ot int64
+ var xoffset int64
+ var g ProgGen
+
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+ size = nptr
+ if size%2 != 0 {
+ size *= 2 // repeated twice
+ }
+ size = size * obj.PointersPerByte / 8 // 4 bits per word
+ size++ // unroll flag in the beginning, used by runtime (see runtime.markallocated)
+
+ // emity space in BSS for unrolled program
+ *pgc0 = nil
+
+ // Don't generate it if it's too large, runtime will unroll directly into GC bitmap.
+ if size <= obj.MaxGCMask {
+ gc0 = typesymprefix(".gc", t)
+ ggloblsym(gc0, int32(size), obj.DUPOK|obj.NOPTR)
+ *pgc0 = gc0
+ }
+
+ // program in RODATA
+ gc1 = typesymprefix(".gcprog", t)
+
+ proggeninit(&g, gc1)
+ xoffset = 0
+ gengcprog1(&g, t, &xoffset)
+ ot = proggenfini(&g)
+ ggloblsym(gc1, int32(ot), obj.DUPOK|obj.RODATA)
+ *pgc1 = gc1
+}
+
+// Recursively walks type t and writes GC program into g.
+func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
+ var fieldoffset int64
+ var i int64
+ var o int64
+ var n int64
+ var t1 *Type
+
+ switch t.Etype {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128:
+ proggenskip(g, *xoffset, t.Width)
+ *xoffset += t.Width
+
+ case TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ proggendata(g, obj.BitsPointer)
+ *xoffset += t.Width
+
+ case TSTRING:
+ proggendata(g, obj.BitsPointer)
+ proggendata(g, obj.BitsScalar)
+ *xoffset += t.Width
+
+ // Assuming IfacePointerOnly=1.
+ case TINTER:
+ proggendata(g, obj.BitsPointer)
+
+ proggendata(g, obj.BitsPointer)
+ *xoffset += t.Width
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ proggendata(g, obj.BitsPointer)
+ proggendata(g, obj.BitsScalar)
+ proggendata(g, obj.BitsScalar)
+ } else {
+ t1 = t.Type
+ if t1.Width == 0 {
+ }
+ // ignore
+ if t.Bound <= 1 || t.Bound*t1.Width < int64(32*Widthptr) {
+ for i = 0; i < t.Bound; i++ {
+ gengcprog1(g, t1, xoffset)
+ }
+ } else if !haspointers(t1) {
+ n = t.Width
+ n -= -*xoffset & (int64(Widthptr) - 1) // skip to next ptr boundary
+ proggenarray(g, (n+int64(Widthptr)-1)/int64(Widthptr))
+ proggendata(g, obj.BitsScalar)
+ proggenarrayend(g)
+ *xoffset -= (n+int64(Widthptr)-1)/int64(Widthptr)*int64(Widthptr) - t.Width
+ } else {
+ proggenarray(g, t.Bound)
+ gengcprog1(g, t1, xoffset)
+ *xoffset += (t.Bound - 1) * t1.Width
+ proggenarrayend(g)
+ }
+ }
+
+ case TSTRUCT:
+ o = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ fieldoffset = t1.Width
+ proggenskip(g, *xoffset, fieldoffset-o)
+ *xoffset += fieldoffset - o
+ gengcprog1(g, t1.Type, xoffset)
+ o = fieldoffset + t1.Type.Width
+ }
+
+ proggenskip(g, *xoffset, t.Width-o)
+ *xoffset += t.Width - o
+
+ default:
+ Fatal("gengcprog1: unexpected type, %v", Tconv(t, 0))
+ }
+}
diff --git a/src/cmd/internal/gc/reg.go b/src/cmd/internal/gc/reg.go
new file mode 100644
index 0000000000..4cc9286d70
--- /dev/null
+++ b/src/cmd/internal/gc/reg.go
@@ -0,0 +1,1401 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+)
+
+var firstf *Flow
+
+var first int = 1
+
+type rcmp []Rgn
+
+func (x rcmp) Len() int {
+ return len(x)
+}
+
+func (x rcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x rcmp) Less(i, j int) bool {
+ var p1 *Rgn
+ var p2 *Rgn
+
+ p1 = &x[i]
+ p2 = &x[j]
+ if p1.cost != p2.cost {
+ return int(p2.cost)-int(p1.cost) < 0
+ }
+ if p1.varno != p2.varno {
+ return int(p2.varno)-int(p1.varno) < 0
+ }
+ if p1.enter != p2.enter {
+ return int(p2.enter.Id-p1.enter.Id) < 0
+ }
+ return false
+}
+
+func setaddrs(bit Bits) {
+ var i int
+ var n int
+ var v *Var
+ var node *Node
+
+ for bany(&bit) != 0 {
+ // convert each bit to a variable
+ i = bnum(bit)
+
+ node = var_[i].node
+ n = int(var_[i].name)
+ biclr(&bit, uint(i))
+
+ // disable all pieces of that variable
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node && int(v.name) == n {
+ v.addr = 2
+ }
+ }
+ }
+}
+
+var regnodes [64]*Node
+
+func walkvardef(n *Node, f *Flow, active int) {
+ var f1 *Flow
+ var f2 *Flow
+ var bn int
+ var v *Var
+
+ for f1 = f; f1 != nil; f1 = f1.S1 {
+ if f1.Active == int32(active) {
+ break
+ }
+ f1.Active = int32(active)
+ if f1.Prog.As == obj.AVARKILL && f1.Prog.To.Node == n {
+ break
+ }
+ for v, _ = n.Opt.(*Var); v != nil; v = v.nextinnode {
+ bn = v.id
+ biset(&(f1.Data.(*Reg)).act, uint(bn))
+ }
+
+ if f1.Prog.As == obj.ACALL {
+ break
+ }
+ }
+
+ for f2 = f; f2 != f1; f2 = f2.S1 {
+ if f2.S2 != nil {
+ walkvardef(n, f2.S2, active)
+ }
+ }
+}
+
+/*
+ * add mov b,rn
+ * just after r
+ */
+func addmove(r *Flow, bn int, rn int, f int) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var a *obj.Addr
+ var v *Var
+
+ p1 = Ctxt.NewProg()
+ Clearp(p1)
+ p1.Pc = 9999
+
+ p = r.Prog
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+
+ v = &var_[bn:][0]
+
+ a = &p1.To
+ a.Offset = v.offset
+ a.Etype = uint8(v.etype)
+ a.Type = obj.TYPE_MEM
+ a.Name = v.name
+ a.Node = v.node
+ a.Sym = Linksym(v.node.Sym)
+
+ /* NOTE(rsc): 9g did
+ if(a->etype == TARRAY)
+ a->type = TYPE_ADDR;
+ else if(a->sym == nil)
+ a->type = TYPE_CONST;
+ */
+ p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+
+ // TODO(rsc): Remove special case here.
+ if (Thearch.Thechar == '9' || Thearch.Thechar == '5') && v.etype == TBOOL {
+ p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ }
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = int16(rn)
+ p1.From.Name = obj.NAME_NONE
+ if !(f != 0) {
+ p1.From = *a
+ *a = obj.Zprog.From
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v ===add=== %v\n", p, p1)
+ }
+ Ostats.Nspill++
+}
+
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) int {
+ var t1 int64
+ var t2 int64
+
+ t1 = o1 + int64(w1)
+ t2 = o2 + int64(w2)
+
+ if !(t1 > o2 && t2 > o1) {
+ return 0
+ }
+
+ return 1
+}
+
+func mkvar(f *Flow, a *obj.Addr) Bits {
+ var v *Var
+ var i int
+ var n int
+ var et int
+ var z int
+ var flag int
+ var w int64
+ var regu uint64
+ var o int64
+ var bit Bits
+ var node *Node
+ var r *Reg
+
+ /*
+ * mark registers used
+ */
+ if a.Type == obj.TYPE_NONE {
+ goto none
+ }
+
+ r = f.Data.(*Reg)
+ r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+
+ switch a.Type {
+ default:
+ regu = Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
+ if regu == 0 {
+ goto none
+ }
+ bit = zbits
+ bit.b[0] = regu
+ return bit
+
+ // TODO(rsc): Remove special case here.
+ case obj.TYPE_ADDR:
+ if Thearch.Thechar == '9' || Thearch.Thechar == '5' {
+ goto memcase
+ }
+ a.Type = obj.TYPE_MEM
+ bit = mkvar(f, a)
+ setaddrs(bit)
+ a.Type = obj.TYPE_ADDR
+ Ostats.Naddr++
+ goto none
+
+ memcase:
+ fallthrough
+
+ case obj.TYPE_MEM:
+ if r != R {
+ r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
+ }
+
+ /* NOTE: 5g did
+ if(r->f.prog->scond & (C_PBIT|C_WBIT))
+ r->set.b[0] |= RtoB(a->reg);
+ */
+ switch a.Name {
+ default:
+ goto none
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC,
+ obj.NAME_PARAM,
+ obj.NAME_AUTO:
+ n = int(a.Name)
+ }
+ }
+
+ node, _ = a.Node.(*Node)
+ if node == nil || node.Op != ONAME || node.Orig == nil {
+ goto none
+ }
+ node = node.Orig
+ if node.Orig != node {
+ Fatal("%v: bad node", Ctxt.Dconv(a))
+ }
+ if node.Sym == nil || node.Sym.Name[0] == '.' {
+ goto none
+ }
+ et = int(a.Etype)
+ o = a.Offset
+ w = a.Width
+ if w < 0 {
+ Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
+ }
+
+ flag = 0
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node && int(v.name) == n {
+ if v.offset == o {
+ if int(v.etype) == et {
+ if int64(v.width) == w {
+ // TODO(rsc): Remove special case for arm here.
+ if !(flag != 0) || Thearch.Thechar != '5' {
+ return blsh(uint(i))
+ }
+ }
+ }
+ }
+
+ // if they overlap, disable both
+ if overlap_reg(v.offset, v.width, o, int(w)) != 0 {
+ // print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
+ v.addr = 1
+
+ flag = 1
+ }
+ }
+ }
+
+ switch et {
+ case 0,
+ TFUNC:
+ goto none
+ }
+
+ if nvar >= NVAR {
+ if Debug['w'] > 1 && node != nil {
+ Fatal("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+ }
+
+ // If we're not tracking a word in a variable, mark the rest as
+ // having its address taken, so that we keep the whole thing
+ // live at all calls. otherwise we might optimize away part of
+ // a variable but not all of it.
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node {
+ v.addr = 1
+ }
+ }
+
+ goto none
+ }
+
+ i = nvar
+ nvar++
+ v = &var_[i:][0]
+ v.id = i
+ v.offset = o
+ v.name = int8(n)
+ v.etype = int8(et)
+ v.width = int(w)
+ v.addr = int8(flag) // funny punning
+ v.node = node
+
+ // node->opt is the head of a linked list
+ // of Vars within the given Node, so that
+ // we can start at a Var and find all the other
+ // Vars in the same Go variable.
+ v.nextinnode, _ = node.Opt.(*Var)
+
+ node.Opt = v
+
+ bit = blsh(uint(i))
+ if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
+ for z = 0; z < BITS; z++ {
+ externs.b[z] |= bit.b[z]
+ }
+ }
+ if n == obj.NAME_PARAM {
+ for z = 0; z < BITS; z++ {
+ params.b[z] |= bit.b[z]
+ }
+ }
+
+ if node.Class == PPARAM {
+ for z = 0; z < BITS; z++ {
+ ivar.b[z] |= bit.b[z]
+ }
+ }
+ if node.Class == PPARAMOUT {
+ for z = 0; z < BITS; z++ {
+ ovar.b[z] |= bit.b[z]
+ }
+ }
+
+ // Treat values with their address taken as live at calls,
+ // because the garbage collector's liveness analysis in ../gc/plive.c does.
+ // These must be consistent or else we will elide stores and the garbage
+ // collector will see uninitialized data.
+ // The typical case where our own analysis is out of sync is when the
+ // node appears to have its address taken but that code doesn't actually
+ // get generated and therefore doesn't show up as an address being
+ // taken when we analyze the instruction stream.
+ // One instance of this case is when a closure uses the same name as
+ // an outer variable for one of its own variables declared with :=.
+ // The parser flags the outer variable as possibly shared, and therefore
+ // sets addrtaken, even though it ends up not being actually shared.
+ // If we were better about _ elision, _ = &x would suffice too.
+ // The broader := in a closure problem is mentioned in a comment in
+ // closure.c:/^typecheckclosure and dcl.c:/^oldname.
+ if node.Addrtaken != 0 {
+ v.addr = 1
+ }
+
+ // Disable registerization for globals, because:
+ // (1) we might panic at any time and we want the recovery code
+ // to see the latest values (issue 1304).
+ // (2) we don't know what pointers might point at them and we want
+ // loads via those pointers to see updated values and vice versa (issue 7995).
+ //
+ // Disable registerization for results if using defer, because the deferred func
+ // might recover and return, causing the current values to be used.
+ if node.Class == PEXTERN || (Hasdefer != 0 && node.Class == PPARAMOUT) {
+ v.addr = 1
+ }
+
+ if Debug['R'] != 0 {
+ fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+ }
+ Ostats.Nvar++
+
+ return bit
+
+none:
+ return zbits
+}
+
+func prop(f *Flow, ref Bits, cal Bits) {
+ var f1 *Flow
+ var f2 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var i int
+ var v *Var
+ var v1 *Var
+
+ for f1 = f; f1 != nil; f1 = f1.P1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ ref.b[z] |= r1.refahead.b[z]
+ if ref.b[z] != r1.refahead.b[z] {
+ r1.refahead.b[z] = ref.b[z]
+ change++
+ }
+
+ cal.b[z] |= r1.calahead.b[z]
+ if cal.b[z] != r1.calahead.b[z] {
+ r1.calahead.b[z] = cal.b[z]
+ change++
+ }
+ }
+
+ switch f1.Prog.As {
+ case obj.ACALL:
+ if Noreturn(f1.Prog) != 0 {
+ break
+ }
+
+ // Mark all input variables (ivar) as used, because that's what the
+ // liveness bitmaps say. The liveness bitmaps say that so that a
+ // panic will not show stale values in the parameter dump.
+ // Mark variables with a recent VARDEF (r1->act) as used,
+ // so that the optimizer flushes initializations to memory,
+ // so that if a garbage collection happens during this CALL,
+ // the collector will see initialized memory. Again this is to
+ // match what the liveness bitmaps say.
+ for z = 0; z < BITS; z++ {
+ cal.b[z] |= ref.b[z] | externs.b[z] | ivar.b[z] | r1.act.b[z]
+ ref.b[z] = 0
+ }
+
+ // cal.b is the current approximation of what's live across the call.
+ // Every bit in cal.b is a single stack word. For each such word,
+ // find all the other tracked stack words in the same Go variable
+ // (struct/slice/string/interface) and mark them live too.
+ // This is necessary because the liveness analysis for the garbage
+ // collector works at variable granularity, not at word granularity.
+ // It is fundamental for slice/string/interface: the garbage collector
+ // needs the whole value, not just some of the words, in order to
+ // interpret the other bits correctly. Specifically, slice needs a consistent
+ // ptr and cap, string needs a consistent ptr and len, and interface
+ // needs a consistent type word and data word.
+ for z = 0; z < BITS; z++ {
+ if cal.b[z] == 0 {
+ continue
+ }
+ for i = 0; i < 64; i++ {
+ if z*64+i >= nvar || (cal.b[z]>>uint(i))&1 == 0 {
+ continue
+ }
+ v = &var_[z*64+i:][0]
+ if v.node.Opt == nil { // v represents fixed register, not Go variable
+ continue
+ }
+
+ // v->node->opt is the head of a linked list of Vars
+ // corresponding to tracked words from the Go variable v->node.
+ // Walk the list and set all the bits.
+ // For a large struct this could end up being quadratic:
+ // after the first setting, the outer loop (for z, i) would see a 1 bit
+ // for all of the remaining words in the struct, and for each such
+ // word would go through and turn on all the bits again.
+ // To avoid the quadratic behavior, we only turn on the bits if
+ // v is the head of the list or if the head's bit is not yet turned on.
+ // This will set the bits at most twice, keeping the overall loop linear.
+ v1, _ = v.node.Opt.(*Var)
+
+ if v == v1 || !(btest(&cal, uint(v1.id)) != 0) {
+ for ; v1 != nil; v1 = v1.nextinnode {
+ biset(&cal, uint(v1.id))
+ }
+ }
+ }
+ }
+
+ case obj.ATEXT:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = 0
+ ref.b[z] = 0
+ }
+
+ case obj.ARET:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = externs.b[z] | ovar.b[z]
+ ref.b[z] = 0
+ }
+ }
+
+ for z = 0; z < BITS; z++ {
+ ref.b[z] = ref.b[z]&^r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z]
+ cal.b[z] &^= (r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z])
+ r1.refbehind.b[z] = ref.b[z]
+ r1.calbehind.b[z] = cal.b[z]
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ }
+
+ for ; f != f1; f = f.P1 {
+ r = f.Data.(*Reg)
+ for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+ prop(f2, r.refbehind, r.calbehind)
+ }
+ }
+}
+
+func synch(f *Flow, dif Bits) {
+ var f1 *Flow
+ var r1 *Reg
+ var z int
+
+ for f1 = f; f1 != nil; f1 = f1.S1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
+ if dif.b[z] != r1.regdiff.b[z] {
+ r1.regdiff.b[z] = dif.b[z]
+ change++
+ }
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ for z = 0; z < BITS; z++ {
+ dif.b[z] &^= (^r1.calbehind.b[z] & r1.calahead.b[z])
+ }
+ if f1.S2 != nil {
+ synch(f1.S2, dif)
+ }
+ }
+}
+
+func allreg(b uint64, r *Rgn) uint64 {
+ var v *Var
+ var i int
+
+ v = &var_[r.varno:][0]
+ r.regno = 0
+ switch v.etype {
+ default:
+ Fatal("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64:
+ i = Thearch.BtoR(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.RtoB(i)
+ }
+
+ case TFLOAT32,
+ TFLOAT64:
+ i = Thearch.BtoF(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.FtoB(i)
+ }
+ }
+
+ return 0
+}
+
+func LOAD(r *Reg, z int) uint64 {
+ return ^r.refbehind.b[z] & r.refahead.b[z]
+}
+
+func STORE(r *Reg, z int) uint64 {
+ return ^r.calbehind.b[z] & r.calahead.b[z]
+}
+
+func paint1(f *Flow, bn int) {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var bb uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ for {
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if !(r1.refahead.b[z]&bb != 0) {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ for {
+ r.act.b[z] |= bb
+
+ if f.Prog.As != obj.ANOP { // don't give credit for NOPs
+ if r.use1.b[z]&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ }
+
+ if !(r.refahead.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ }
+}
+
+func paint2(f *Flow, bn int, depth int) uint64 {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var bb uint64
+ var vreg uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ vreg = regbits
+ r = f.Data.(*Reg)
+ if !(r.act.b[z]&bb != 0) {
+ return vreg
+ }
+ for {
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if !(r1.refahead.b[z]&bb != 0) {
+ break
+ }
+ if !(r1.act.b[z]&bb != 0) {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ for {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" paint2 %d %v\n", depth, f.Prog)
+ }
+
+ r.act.b[z] &^= bb
+
+ vreg |= r.regu
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ }
+
+ if !(r.refahead.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if !(r.act.b[z]&bb != 0) {
+ break
+ }
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ }
+
+ return vreg
+}
+
+func paint3(f *Flow, bn int, rb uint64, rn int) {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var p *obj.Prog
+ var z int
+ var bb uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ for {
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if !(r1.refahead.b[z]&bb != 0) {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ addmove(f, bn, rn, 0)
+ }
+ for {
+ r.act.b[z] |= bb
+ p = f.Prog
+
+ if r.use1.b[z]&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.From, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.To, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ addmove(f, bn, rn, 1)
+ }
+ r.regu |= rb
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ }
+
+ if !(r.refahead.b[z]&bb != 0) {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if !(r.refbehind.b[z]&bb != 0) {
+ break
+ }
+ }
+}
+
+func addreg(a *obj.Addr, rn int) {
+ a.Sym = nil
+ a.Node = nil
+ a.Offset = 0
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ a.Name = 0
+
+ Ostats.Ncvtreg++
+}
+
+func dumpone(f *Flow, isreg int) {
+ var z int
+ var bit Bits
+ var r *Reg
+
+ fmt.Printf("%d:%v", f.Loop, f.Prog)
+ if isreg != 0 {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
+ }
+ if bany(&bit) != 0 {
+ fmt.Printf("\t")
+ if bany(&r.set) != 0 {
+ fmt.Printf(" s:%v", Qconv(r.set, 0))
+ }
+ if bany(&r.use1) != 0 {
+ fmt.Printf(" u1:%v", Qconv(r.use1, 0))
+ }
+ if bany(&r.use2) != 0 {
+ fmt.Printf(" u2:%v", Qconv(r.use2, 0))
+ }
+ if bany(&r.refbehind) != 0 {
+ fmt.Printf(" rb:%v ", Qconv(r.refbehind, 0))
+ }
+ if bany(&r.refahead) != 0 {
+ fmt.Printf(" ra:%v ", Qconv(r.refahead, 0))
+ }
+ if bany(&r.calbehind) != 0 {
+ fmt.Printf(" cb:%v ", Qconv(r.calbehind, 0))
+ }
+ if bany(&r.calahead) != 0 {
+ fmt.Printf(" ca:%v ", Qconv(r.calahead, 0))
+ }
+ if bany(&r.regdiff) != 0 {
+ fmt.Printf(" d:%v ", Qconv(r.regdiff, 0))
+ }
+ if bany(&r.act) != 0 {
+ fmt.Printf(" a:%v ", Qconv(r.act, 0))
+ }
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+func Dumpit(str string, r0 *Flow, isreg int) {
+ var r *Flow
+ var r1 *Flow
+
+ fmt.Printf("\n%s\n", str)
+ for r = r0; r != nil; r = r.Link {
+ dumpone(r, isreg)
+ r1 = r.P2
+ if r1 != nil {
+ fmt.Printf("\tpred:")
+ for ; r1 != nil; r1 = r1.P2link {
+ fmt.Printf(" %.4d", uint(int(r1.Prog.Pc)))
+ }
+ if r.P1 != nil {
+ fmt.Printf(" (and %.4d)", uint(int(r.P1.Prog.Pc)))
+ } else {
+ fmt.Printf(" (only)")
+ }
+ fmt.Printf("\n")
+ }
+
+ // Print successors if it's not just the next one
+ if r.S1 != r.Link || r.S2 != nil {
+ fmt.Printf("\tsucc:")
+ if r.S1 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S1.Prog.Pc)))
+ }
+ if r.S2 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S2.Prog.Pc)))
+ }
+ fmt.Printf("\n")
+ }
+ }
+}
+
+func regopt(firstp *obj.Prog) {
+ var f *Flow
+ var f1 *Flow
+ var r *Reg
+ var p *obj.Prog
+ var g *Graph
+ var info ProgInfo
+ var i int
+ var z int
+ var active int
+ var vreg uint64
+ var usedreg uint64
+ var mask uint64
+ var nreg int
+ var regnames []string
+ var bit Bits
+ var rgp *Rgn
+
+ if first != 0 {
+ first = 0
+ }
+
+ mergetemp(firstp)
+
+ /*
+ * control flow is more complicated in generated go code
+ * than in generated c code. define pseudo-variables for
+ * registers, so we have complete register usage information.
+ */
+ regnames = Thearch.Regnames(&nreg)
+
+ nvar = nreg
+ for i = 0; i < nreg; i++ {
+ var_[i] = Var{}
+ }
+ for i = 0; i < nreg; i++ {
+ if regnodes[i] == nil {
+ regnodes[i] = newname(Lookup(regnames[i]))
+ }
+ var_[i].node = regnodes[i]
+ }
+
+ regbits = Thearch.Excludedregs()
+ externs = zbits
+ params = zbits
+ consts = zbits
+ addrs = zbits
+ ivar = zbits
+ ovar = zbits
+
+ /*
+ * pass 1
+ * build aux data structure
+ * allocate pcs
+ * find use and set of variables
+ */
+ g = Flowstart(firstp, func() interface{} { return new(Reg) })
+
+ if g == nil {
+ for i = 0; i < nvar; i++ {
+ var_[i].node.Opt = nil
+ }
+ return
+ }
+
+ firstf = g.Start
+
+ for f = firstf; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ Thearch.Proginfo(&info, p)
+
+ // Avoid making variables for direct-called functions.
+ if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
+ continue
+ }
+
+ // from vs to doesn't matter for registers.
+ r = f.Data.(*Reg)
+
+ r.use1.b[0] |= info.Reguse | info.Regindex
+ r.set.b[0] |= info.Regset
+
+ bit = mkvar(f, &p.From)
+ if bany(&bit) != 0 {
+ if info.Flags&LeftAddr != 0 {
+ setaddrs(bit)
+ }
+ if info.Flags&LeftRead != 0 {
+ for z = 0; z < BITS; z++ {
+ r.use1.b[z] |= bit.b[z]
+ }
+ }
+ if info.Flags&LeftWrite != 0 {
+ for z = 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+
+ // Compute used register for reg
+ if info.Flags&RegRead != 0 {
+ r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
+ }
+
+ // Currently we never generate three register forms.
+ // If we do, this will need to change.
+ if p.From3.Type != obj.TYPE_NONE {
+ Fatal("regopt not implemented for from3")
+ }
+
+ bit = mkvar(f, &p.To)
+ if bany(&bit) != 0 {
+ if info.Flags&RightAddr != 0 {
+ setaddrs(bit)
+ }
+ if info.Flags&RightRead != 0 {
+ for z = 0; z < BITS; z++ {
+ r.use2.b[z] |= bit.b[z]
+ }
+ }
+ if info.Flags&RightWrite != 0 {
+ for z = 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+ }
+
+ for i = 0; i < nvar; i++ {
+ var v *Var
+ v = &var_[i:][0]
+ if v.addr != 0 {
+ bit = blsh(uint(i))
+ for z = 0; z < BITS; z++ {
+ addrs.b[z] |= bit.b[z]
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, Nconv(v.node, 0), v.offset)
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass1", firstf, 1)
+ }
+
+ /*
+ * pass 2
+ * find looping structure
+ */
+ flowrpo(g)
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass2", firstf, 1)
+ }
+
+ /*
+ * pass 2.5
+ * iterate propagating fat vardef covering forward
+ * r->act records vars with a VARDEF since the last CALL.
+ * (r->act will be reused in pass 5 for something else,
+ * but we'll be done with it by then.)
+ */
+ active = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ r = f.Data.(*Reg)
+ r.act = zbits
+ }
+
+ for f = firstf; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) != 0 && ((p.To.Node).(*Node)).Opt != nil {
+ active++
+ walkvardef(p.To.Node.(*Node), f, active)
+ }
+ }
+
+ /*
+ * pass 3
+ * iterate propagating usage
+ * back until flow graph is complete
+ */
+loop1:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ for f = firstf; f != nil; f = f.Link {
+ if f.Prog.As == obj.ARET {
+ prop(f, zbits, zbits)
+ }
+ }
+
+ /* pick up unreachable code */
+loop11:
+ i = 0
+
+ for f = firstf; f != nil; f = f1 {
+ f1 = f.Link
+ if f1 != nil && f1.Active != 0 && !(f.Active != 0) {
+ prop(f, zbits, zbits)
+ i = 1
+ }
+ }
+
+ if i != 0 {
+ goto loop11
+ }
+ if change != 0 {
+ goto loop1
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass3", firstf, 1)
+ }
+
+ /*
+ * pass 4
+ * iterate propagating register/variable synchrony
+ * forward until graph is complete
+ */
+loop2:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ synch(firstf, zbits)
+ if change != 0 {
+ goto loop2
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4", firstf, 1)
+ }
+
+ /*
+ * pass 4.5
+ * move register pseudo-variables into regu.
+ */
+ mask = (1 << uint(nreg)) - 1
+ for f = firstf; f != nil; f = f.Link {
+ r = f.Data.(*Reg)
+ r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
+ r.set.b[0] &^= mask
+ r.use1.b[0] &^= mask
+ r.use2.b[0] &^= mask
+ r.refbehind.b[0] &^= mask
+ r.refahead.b[0] &^= mask
+ r.calbehind.b[0] &^= mask
+ r.calahead.b[0] &^= mask
+ r.regdiff.b[0] &^= mask
+ r.act.b[0] &^= mask
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4.5", firstf, 1)
+ }
+
+ /*
+ * pass 5
+ * isolate regions
+ * calculate costs (paint1)
+ */
+ f = firstf
+
+ if f != nil {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
+ }
+ if bany(&bit) != 0 && !(f.Refset != 0) {
+ // should never happen - all variables are preset
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), Qconv(bit, 0))
+ }
+ f.Refset = 1
+ }
+ }
+
+ for f = firstf; f != nil; f = f.Link {
+ (f.Data.(*Reg)).act = zbits
+ }
+ nregion = 0
+ for f = firstf; f != nil; f = f.Link {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
+ }
+ if bany(&bit) != 0 && !(f.Refset != 0) {
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), Qconv(bit, 0))
+ }
+ f.Refset = 1
+ Thearch.Excise(f)
+ }
+
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
+ }
+ for bany(&bit) != 0 {
+ i = bnum(bit)
+ change = 0
+ paint1(f, i)
+ biclr(&bit, uint(i))
+ if change <= 0 {
+ continue
+ }
+ if nregion >= NRGN {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("too many regions\n")
+ }
+ goto brk
+ }
+
+ rgp = &region[nregion]
+ rgp.enter = f
+ rgp.varno = int16(i)
+ rgp.cost = int16(change)
+ nregion++
+ }
+ }
+
+brk:
+ sort.Sort(rcmp(region[:nregion]))
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass5", firstf, 1)
+ }
+
+ /*
+ * pass 6
+ * determine used registers (paint2)
+ * replace code (paint3)
+ */
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\nregisterizing\n")
+ }
+ for i = 0; i < nregion; i++ {
+ rgp = &region[i]
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
+ }
+ bit = blsh(uint(rgp.varno))
+ usedreg = paint2(rgp.enter, int(rgp.varno), 0)
+ vreg = allreg(usedreg, rgp)
+ if rgp.regno != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ var v *Var
+
+ v = &var_[rgp.varno:][0]
+ fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", Nconv(v.node, 0), v.offset, rgp.varno, Econv(int(v.etype), 0), Ctxt.Rconv(int(rgp.regno)), usedreg, vreg)
+ }
+
+ paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
+ }
+ }
+
+ /*
+ * free aux structures. peep allocates new ones.
+ */
+ for i = 0; i < nvar; i++ {
+ var_[i].node.Opt = nil
+ }
+ Flowend(g)
+ firstf = nil
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ // Rebuild flow graph, since we inserted instructions
+ g = Flowstart(firstp, nil)
+
+ firstf = g.Start
+ Dumpit("pass6", firstf, 0)
+ Flowend(g)
+ firstf = nil
+ }
+
+ /*
+ * pass 7
+ * peep-hole on basic block
+ */
+ if !(Debug['R'] != 0) || Debug['P'] != 0 {
+ Thearch.Peep(firstp)
+ }
+
+ /*
+ * eliminate nops
+ */
+ for p = firstp; p != nil; p = p.Link {
+ for p.Link != nil && p.Link.As == obj.ANOP {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.U.Branch != nil && p.To.U.Branch.As == obj.ANOP {
+ p.To.U.Branch = p.To.U.Branch.Link
+ }
+ }
+ }
+
+ if Debug['R'] != 0 {
+ if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
+ fmt.Printf("\nstats\n")
+ }
+
+ if Ostats.Ncvtreg != 0 {
+ fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
+ }
+ if Ostats.Nspill != 0 {
+ fmt.Printf("\t%4d spill\n", Ostats.Nspill)
+ }
+ if Ostats.Nreload != 0 {
+ fmt.Printf("\t%4d reload\n", Ostats.Nreload)
+ }
+ if Ostats.Ndelmov != 0 {
+ fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
+ }
+ if Ostats.Nvar != 0 {
+ fmt.Printf("\t%4d var\n", Ostats.Nvar)
+ }
+ if Ostats.Naddr != 0 {
+ fmt.Printf("\t%4d addr\n", Ostats.Naddr)
+ }
+
+ Ostats = OptStats{}
+ }
+}
diff --git a/src/cmd/internal/gc/select.go b/src/cmd/internal/gc/select.go
new file mode 100644
index 0000000000..9e659d19ea
--- /dev/null
+++ b/src/cmd/internal/gc/select.go
@@ -0,0 +1,389 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+/*
+ * select
+ */
+func typecheckselect(sel *Node) {
+ var ncase *Node
+ var n *Node
+ var def *Node
+ var l *NodeList
+ var lno int
+ var count int
+
+ def = nil
+ lno = int(setlineno(sel))
+ count = 0
+ typechecklist(sel.Ninit, Etop)
+ for l = sel.List; l != nil; l = l.Next {
+ count++
+ ncase = l.N
+ setlineno(ncase)
+ if ncase.Op != OXCASE {
+ Fatal("typecheckselect %v", Oconv(int(ncase.Op), 0))
+ }
+
+ if ncase.List == nil {
+ // default
+ if def != nil {
+ Yyerror("multiple defaults in select (first at %v)", def.Line())
+ } else {
+ def = ncase
+ }
+ } else if ncase.List.Next != nil {
+ Yyerror("select cases cannot be lists")
+ } else {
+ n = typecheck(&ncase.List.N, Etop)
+ ncase.Left = n
+ ncase.List = nil
+ setlineno(n)
+ switch n.Op {
+ default:
+ Yyerror("select case must be receive, send or assign recv")
+
+ // convert x = <-c into OSELRECV(x, <-c).
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ case OAS:
+ if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit != 0 {
+ n.Right = n.Right.Left
+ }
+
+ if n.Right.Op != ORECV {
+ Yyerror("select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV
+
+ // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
+ case OAS2RECV:
+ if n.Rlist.N.Op != ORECV {
+ Yyerror("select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV2
+ n.Left = n.List.N
+ n.Ntest = n.List.Next.N
+ n.List = nil
+ n.Right = n.Rlist.N
+ n.Rlist = nil
+
+ // convert <-c into OSELRECV(N, <-c)
+ case ORECV:
+ n = Nod(OSELRECV, nil, n)
+
+ n.Typecheck = 1
+ ncase.Left = n
+
+ case OSEND:
+ break
+ }
+ }
+
+ typechecklist(ncase.Nbody, Etop)
+ }
+
+ sel.Xoffset = int64(count)
+ lineno = int32(lno)
+}
+
+func walkselect(sel *Node) {
+ var lno int
+ var i int
+ var n *Node
+ var r *Node
+ var a *Node
+ var var_ *Node
+ var selv *Node
+ var cas *Node
+ var dflt *Node
+ var ch *Node
+ var l *NodeList
+ var init *NodeList
+
+ if sel.List == nil && sel.Xoffset != 0 {
+ Fatal("double walkselect") // already rewrote
+ }
+
+ lno = int(setlineno(sel))
+ i = count(sel.List)
+
+ // optimization: zero-case select
+ if i == 0 {
+ sel.Nbody = list1(mkcall("block", nil, nil))
+ goto out
+ }
+
+ // optimization: one-case select: single op.
+ // TODO(rsc): Reenable optimization once order.c can handle it.
+ // golang.org/issue/7672.
+ if i == 1 {
+ cas = sel.List.N
+ setlineno(cas)
+ l = cas.Ninit
+ if cas.Left != nil { // not default:
+ n = cas.Left
+ l = concat(l, n.Ninit)
+ n.Ninit = nil
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ // ok already
+ case OSEND:
+ ch = n.Left
+
+ case OSELRECV,
+ OSELRECV2:
+ ch = n.Right.Left
+ if n.Op == OSELRECV || n.Ntest == nil {
+ if n.Left == nil {
+ n = n.Right
+ } else {
+ n.Op = OAS
+ }
+ break
+ }
+
+ if n.Left == nil {
+ typecheck(&nblank, Erv|Easgn)
+ n.Left = nblank
+ }
+
+ n.Op = OAS2
+ n.List = list(list1(n.Left), n.Ntest)
+ n.Rlist = list1(n.Right)
+ n.Right = nil
+ n.Left = nil
+ n.Ntest = nil
+ n.Typecheck = 0
+ typecheck(&n, Etop)
+ }
+
+ // if ch == nil { block() }; n;
+ a = Nod(OIF, nil, nil)
+
+ a.Ntest = Nod(OEQ, ch, nodnil())
+ a.Nbody = list1(mkcall("block", nil, &l))
+ typecheck(&a, Etop)
+ l = list(l, a)
+ l = list(l, n)
+ }
+
+ l = concat(l, cas.Nbody)
+ sel.Nbody = l
+ goto out
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ for l = sel.List; l != nil; l = l.Next {
+ cas = l.N
+ setlineno(cas)
+ n = cas.Left
+ if n == nil {
+ continue
+ }
+ switch n.Op {
+ case OSEND:
+ n.Right = Nod(OADDR, n.Right, nil)
+ typecheck(&n.Right, Erv)
+
+ case OSELRECV,
+ OSELRECV2:
+ if n.Op == OSELRECV2 && n.Ntest == nil {
+ n.Op = OSELRECV
+ }
+ if n.Op == OSELRECV2 {
+ n.Ntest = Nod(OADDR, n.Ntest, nil)
+ typecheck(&n.Ntest, Erv)
+ }
+
+ if n.Left == nil {
+ n.Left = nodnil()
+ } else {
+ n.Left = Nod(OADDR, n.Left, nil)
+ typecheck(&n.Left, Erv)
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
+ if sel.List.N.Left == nil {
+ cas = sel.List.Next.N
+ dflt = sel.List.N
+ } else {
+ dflt = sel.List.Next.N
+ cas = sel.List.N
+ }
+
+ n = cas.Left
+ setlineno(n)
+ r = Nod(OIF, nil, nil)
+ r.Ninit = cas.Ninit
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ // if selectnbsend(c, v) { body } else { default body }
+ case OSEND:
+ ch = n.Left
+
+ r.Ntest = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
+
+ // if c != nil && selectnbrecv(&v, c) { body } else { default body }
+ case OSELRECV:
+ r = Nod(OIF, nil, nil)
+
+ r.Ninit = cas.Ninit
+ ch = n.Right.Left
+ r.Ntest = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
+
+ // if c != nil && selectnbrecv2(&v, c) { body } else { default body }
+ case OSELRECV2:
+ r = Nod(OIF, nil, nil)
+
+ r.Ninit = cas.Ninit
+ ch = n.Right.Left
+ r.Ntest = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.Ntest, ch)
+ }
+
+ typecheck(&r.Ntest, Erv)
+ r.Nbody = cas.Nbody
+ r.Nelse = concat(dflt.Ninit, dflt.Nbody)
+ sel.Nbody = list1(r)
+ goto out
+ }
+
+ init = sel.Ninit
+ sel.Ninit = nil
+
+ // generate sel-struct
+ setlineno(sel)
+
+ selv = temp(selecttype(int32(sel.Xoffset)))
+ r = Nod(OAS, selv, nil)
+ typecheck(&r, Etop)
+ init = list(init, r)
+ var_ = conv(conv(Nod(OADDR, selv, nil), Types[TUNSAFEPTR]), Ptrto(Types[TUINT8]))
+ r = mkcall("newselect", nil, nil, var_, Nodintconst(selv.Type.Width), Nodintconst(sel.Xoffset))
+ typecheck(&r, Etop)
+ init = list(init, r)
+
+ // register cases
+ for l = sel.List; l != nil; l = l.Next {
+ cas = l.N
+ setlineno(cas)
+ n = cas.Left
+ r = Nod(OIF, nil, nil)
+ r.Ninit = cas.Ninit
+ cas.Ninit = nil
+ if n != nil {
+ r.Ninit = concat(r.Ninit, n.Ninit)
+ n.Ninit = nil
+ }
+
+ if n == nil {
+ // selectdefault(sel *byte);
+ r.Ntest = mkcall("selectdefault", Types[TBOOL], &r.Ninit, var_)
+ } else {
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ // selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
+ case OSEND:
+ r.Ntest = mkcall1(chanfn("selectsend", 2, n.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Left, n.Right)
+
+ // selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
+ case OSELRECV:
+ r.Ntest = mkcall1(chanfn("selectrecv", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left)
+
+ // selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
+ case OSELRECV2:
+ r.Ntest = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.Ntest)
+ }
+ }
+
+ // selv is no longer alive after use.
+ r.Nbody = list(r.Nbody, Nod(OVARKILL, selv, nil))
+
+ r.Nbody = concat(r.Nbody, cas.Nbody)
+ r.Nbody = list(r.Nbody, Nod(OBREAK, nil, nil))
+ init = list(init, r)
+ }
+
+ // run the select
+ setlineno(sel)
+
+ init = list(init, mkcall("selectgo", nil, nil, var_))
+ sel.Nbody = init
+
+out:
+ sel.List = nil
+ walkstmtlist(sel.Nbody)
+ lineno = int32(lno)
+}
+
+// Keep in sync with src/runtime/chan.h.
+func selecttype(size int32) *Type {
+ var sel *Node
+ var sudog *Node
+ var scase *Node
+ var arr *Node
+
+ // TODO(dvyukov): it's possible to generate SudoG and Scase only once
+ // and then cache; and also cache Select per size.
+ sudog = Nod(OTSTRUCT, nil, nil)
+
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
+ typecheck(&sudog, Etype)
+ sudog.Type.Noalg = 1
+ sudog.Type.Local = 1
+
+ scase = Nod(OTSTRUCT, nil, nil)
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ typecheck(&scase, Etype)
+ scase.Type.Noalg = 1
+ scase.Type.Local = 1
+
+ sel = Nod(OTSTRUCT, nil, nil)
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), scase)
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
+ typecheck(&sel, Etype)
+ sel.Type.Noalg = 1
+ sel.Type.Local = 1
+
+ return sel.Type
+}
diff --git a/src/cmd/internal/gc/sinit.go b/src/cmd/internal/gc/sinit.go
new file mode 100644
index 0000000000..6d044f1285
--- /dev/null
+++ b/src/cmd/internal/gc/sinit.go
@@ -0,0 +1,1602 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * static initialization
+ */
+const (
+ InitNotStarted = 0
+ InitDone = 1
+ InitPending = 2
+)
+
+var initlist *NodeList
+
+// init1 walks the AST starting at n, and accumulates in out
+// the list of definitions needing init code in dependency order.
+func init1(n *Node, out **NodeList) {
+ var l *NodeList
+ var nv *Node
+
+ if n == nil {
+ return
+ }
+ init1(n.Left, out)
+ init1(n.Right, out)
+ for l = n.List; l != nil; l = l.Next {
+ init1(l.N, out)
+ }
+
+ if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
+ // Methods called as Type.Method(receiver, ...).
+ // Definitions for method expressions are stored in type->nname.
+ init1(n.Type.Nname, out)
+ }
+
+ if n.Op != ONAME {
+ return
+ }
+ switch n.Class {
+ case PEXTERN,
+ PFUNC:
+ break
+
+ default:
+ if isblank(n) && n.Curfn == nil && n.Defn != nil && n.Defn.Initorder == InitNotStarted {
+ // blank names initialization is part of init() but not
+ // when they are inside a function.
+ break
+ }
+
+ return
+ }
+
+ if n.Initorder == InitDone {
+ return
+ }
+ if n.Initorder == InitPending {
+ // Since mutually recursive sets of functions are allowed,
+ // we don't necessarily raise an error if n depends on a node
+ // which is already waiting for its dependencies to be visited.
+ //
+ // initlist contains a cycle of identifiers referring to each other.
+ // If this cycle contains a variable, then this variable refers to itself.
+ // Conversely, if there exists an initialization cycle involving
+ // a variable in the program, the tree walk will reach a cycle
+ // involving that variable.
+ if n.Class != PFUNC {
+ nv = n
+ goto foundinitloop
+ }
+
+ for l = initlist; l.N != n; l = l.Next {
+ if l.N.Class != PFUNC {
+ nv = l.N
+ goto foundinitloop
+ }
+ }
+
+ // The loop involves only functions, ok.
+ return
+
+ // if there have already been errors printed,
+ // those errors probably confused us and
+ // there might not be a loop. let the user
+ // fix those first.
+ foundinitloop:
+ Flusherrors()
+
+ if nerrors > 0 {
+ errorexit()
+ }
+
+ // There is a loop involving nv. We know about
+ // n and initlist = n1 <- ... <- nv <- ... <- n <- ...
+ fmt.Printf("%v: initialization loop:\n", nv.Line())
+
+ // Build back pointers in initlist.
+ for l = initlist; l != nil; l = l.Next {
+ if l.Next != nil {
+ l.Next.End = l
+ }
+ }
+
+ // Print nv -> ... -> n1 -> n.
+ for l = initlist; l.N != nv; l = l.Next {
+ }
+ for ; l != nil; l = l.End {
+ fmt.Printf("\t%v %v refers to\n", l.N.Line(), Sconv(l.N.Sym, 0))
+ }
+
+ // Print n -> ... -> nv.
+ for l = initlist; l.N != n; l = l.Next {
+ }
+ for ; l.N != nv; l = l.End {
+ fmt.Printf("\t%v %v refers to\n", l.N.Line(), Sconv(l.N.Sym, 0))
+ }
+ fmt.Printf("\t%v %v\n", nv.Line(), Sconv(nv.Sym, 0))
+ errorexit()
+ }
+
+ // reached a new unvisited node.
+ n.Initorder = InitPending
+
+ l = new(NodeList)
+ if l == nil {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ l.Next = initlist
+ l.N = n
+ l.End = nil
+ initlist = l
+
+ // make sure that everything n depends on is initialized.
+ // n->defn is an assignment to n
+ if n.Defn != nil {
+ switch n.Defn.Op {
+ default:
+ goto bad
+
+ case ODCLFUNC:
+ init2list(n.Defn.Nbody, out)
+
+ case OAS:
+ if n.Defn.Left != n {
+ goto bad
+ }
+ if isblank(n.Defn.Left) && candiscard(n.Defn.Right) != 0 {
+ n.Defn.Op = OEMPTY
+ n.Defn.Left = nil
+ n.Defn.Right = nil
+ break
+ }
+
+ init2(n.Defn.Right, out)
+ if Debug['j'] != 0 {
+ fmt.Printf("%v\n", Sconv(n.Sym, 0))
+ }
+ if isblank(n) || !(staticinit(n, out) != 0) {
+ if Debug['%'] != 0 {
+ Dump("nonstatic", n.Defn)
+ }
+ *out = list(*out, n.Defn)
+ }
+
+ case OAS2FUNC,
+ OAS2MAPR,
+ OAS2DOTTYPE,
+ OAS2RECV:
+ if n.Defn.Initorder != InitNotStarted {
+ break
+ }
+ n.Defn.Initorder = InitDone
+ for l = n.Defn.Rlist; l != nil; l = l.Next {
+ init1(l.N, out)
+ }
+ if Debug['%'] != 0 {
+ Dump("nonstatic", n.Defn)
+ }
+ *out = list(*out, n.Defn)
+ }
+ }
+
+ l = initlist
+ initlist = l.Next
+ if l.N != n {
+ Fatal("bad initlist")
+ }
+
+ n.Initorder = InitDone
+ return
+
+bad:
+ Dump("defn", n.Defn)
+ Fatal("init1: bad defn")
+}
+
+// recurse over n, doing init1 everywhere.
+func init2(n *Node, out **NodeList) {
+ if n == nil || n.Initorder == InitDone {
+ return
+ }
+
+ if n.Op == ONAME && n.Ninit != nil {
+ Fatal("name %v with ninit: %v\n", Sconv(n.Sym, 0), Nconv(n, obj.FmtSign))
+ }
+
+ init1(n, out)
+ init2(n.Left, out)
+ init2(n.Right, out)
+ init2(n.Ntest, out)
+ init2list(n.Ninit, out)
+ init2list(n.List, out)
+ init2list(n.Rlist, out)
+ init2list(n.Nbody, out)
+ init2list(n.Nelse, out)
+
+ if n.Op == OCLOSURE {
+ init2list(n.Closure.Nbody, out)
+ }
+ if n.Op == ODOTMETH || n.Op == OCALLPART {
+ init2(n.Type.Nname, out)
+ }
+}
+
+func init2list(l *NodeList, out **NodeList) {
+ for ; l != nil; l = l.Next {
+ init2(l.N, out)
+ }
+}
+
+func initreorder(l *NodeList, out **NodeList) {
+ var n *Node
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ switch n.Op {
+ case ODCLFUNC,
+ ODCLCONST,
+ ODCLTYPE:
+ continue
+ }
+
+ initreorder(n.Ninit, out)
+ n.Ninit = nil
+ init1(n, out)
+ }
+}
+
+// initfix computes initialization order for a list l of top-level
+// declarations and outputs the corresponding list of statements
+// to include in the init() function body.
+func initfix(l *NodeList) *NodeList {
+ var lout *NodeList
+ var lno int
+
+ lout = nil
+ lno = int(lineno)
+ initreorder(l, &lout)
+ lineno = int32(lno)
+ return lout
+}
+
+/*
+ * compilation of top-level (static) assignments
+ * into DATA statements if at all possible.
+ */
+func staticinit(n *Node, out **NodeList) int {
+ var l *Node
+ var r *Node
+
+ if n.Op != ONAME || n.Class != PEXTERN || n.Defn == nil || n.Defn.Op != OAS {
+ Fatal("staticinit")
+ }
+
+ lineno = n.Lineno
+ l = n.Defn.Left
+ r = n.Defn.Right
+ return staticassign(l, r, out)
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func staticcopy(l *Node, r *Node, out **NodeList) int {
+ var i int
+ var e *InitEntry
+ var p *InitPlan
+ var a *Node
+ var ll *Node
+ var rr *Node
+ var orig *Node
+ var n1 Node
+
+ if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
+ return 0
+ }
+ if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ return 0
+ }
+ if r.Defn.Op != OAS {
+ return 0
+ }
+ orig = r
+ r = r.Defn.Right
+
+ switch r.Op {
+ case ONAME:
+ if staticcopy(l, r, out) != 0 {
+ return 1
+ }
+ *out = list(*out, Nod(OAS, l, r))
+ return 1
+
+ case OLITERAL:
+ if iszero(r) != 0 {
+ return 1
+ }
+ gdata(l, r, int(l.Type.Width))
+ return 1
+
+ case OADDR:
+ switch r.Left.Op {
+ case ONAME:
+ gdata(l, r, int(l.Type.Width))
+ return 1
+ }
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ //dump("not static addr", r);
+ default:
+ break
+
+ // copy pointer
+ case OARRAYLIT,
+ OSTRUCTLIT,
+ OMAPLIT:
+ gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
+
+ return 1
+ }
+
+ case OARRAYLIT:
+ if Isslice(r.Type) != 0 {
+ // copy slice
+ a = r.Nname
+
+ n1 = *l
+ n1.Xoffset = l.Xoffset + int64(Array_array)
+ gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+ n1.Xoffset = l.Xoffset + int64(Array_nel)
+ gdata(&n1, r.Right, Widthint)
+ n1.Xoffset = l.Xoffset + int64(Array_cap)
+ gdata(&n1, r.Right, Widthint)
+ return 1
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ p = r.Initplan
+
+ n1 = *l
+ for i = 0; i < len(p.E); i++ {
+ e = &p.E[i]
+ n1.Xoffset = l.Xoffset + e.Xoffset
+ n1.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ gdata(&n1, e.Expr, int(n1.Type.Width))
+ } else {
+ ll = Nod(OXXX, nil, nil)
+ *ll = n1
+ ll.Orig = ll // completely separate copy
+ if !(staticassign(ll, e.Expr, out) != 0) {
+ // Requires computation, but we're
+ // copying someone else's computation.
+ rr = Nod(OXXX, nil, nil)
+
+ *rr = *orig
+ rr.Orig = rr // completely separate copy
+ rr.Type = ll.Type
+ rr.Xoffset += e.Xoffset
+ *out = list(*out, Nod(OAS, ll, rr))
+ }
+ }
+ }
+
+ return 1
+ }
+
+ return 0
+}
+
+func staticassign(l *Node, r *Node, out **NodeList) int {
+ var a *Node
+ var n1 Node
+ var nam Node
+ var ta *Type
+ var p *InitPlan
+ var e *InitEntry
+ var i int
+ var sval *Strlit
+
+ switch r.Op {
+ //dump("not static", r);
+ default:
+ break
+
+ case ONAME:
+ if r.Class == PEXTERN && r.Sym.Pkg == localpkg {
+ return staticcopy(l, r, out)
+ }
+
+ case OLITERAL:
+ if iszero(r) != 0 {
+ return 1
+ }
+ gdata(l, r, int(l.Type.Width))
+ return 1
+
+ case OADDR:
+ if stataddr(&nam, r.Left) != 0 {
+ n1 = *r
+ n1.Left = &nam
+ gdata(l, &n1, int(l.Type.Width))
+ return 1
+ }
+ fallthrough
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ //dump("not static ptrlit", r);
+ default:
+ break
+
+ // Init pointer.
+ case OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT:
+ a = staticname(r.Left.Type, 1)
+
+ r.Nname = a
+ gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
+
+ // Init underlying literal.
+ if !(staticassign(a, r.Left, out) != 0) {
+ *out = list(*out, Nod(OAS, a, r.Left))
+ }
+ return 1
+ }
+
+ case OSTRARRAYBYTE:
+ if l.Class == PEXTERN && r.Left.Op == OLITERAL {
+ sval = r.Left.Val.U.Sval
+ slicebytes(l, sval.S, len(sval.S))
+ return 1
+ }
+
+ case OARRAYLIT:
+ initplan(r)
+ if Isslice(r.Type) != 0 {
+ // Init slice.
+ ta = typ(TARRAY)
+
+ ta.Type = r.Type.Type
+ ta.Bound = Mpgetfix(r.Right.Val.U.Xval)
+ a = staticname(ta, 1)
+ r.Nname = a
+ n1 = *l
+ n1.Xoffset = l.Xoffset + int64(Array_array)
+ gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+ n1.Xoffset = l.Xoffset + int64(Array_nel)
+ gdata(&n1, r.Right, Widthint)
+ n1.Xoffset = l.Xoffset + int64(Array_cap)
+ gdata(&n1, r.Right, Widthint)
+
+ // Fall through to init underlying array.
+ l = a
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ initplan(r)
+
+ p = r.Initplan
+ n1 = *l
+ for i = 0; i < len(p.E); i++ {
+ e = &p.E[i]
+ n1.Xoffset = l.Xoffset + e.Xoffset
+ n1.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ gdata(&n1, e.Expr, int(n1.Type.Width))
+ } else {
+ a = Nod(OXXX, nil, nil)
+ *a = n1
+ a.Orig = a // completely separate copy
+ if !(staticassign(a, e.Expr, out) != 0) {
+ *out = list(*out, Nod(OAS, a, e.Expr))
+ }
+ }
+ }
+
+ return 1
+
+ // TODO: Table-driven map insert.
+ case OMAPLIT:
+ break
+ }
+
+ return 0
+}
+
+/*
+ * from here down is the walk analysis
+ * of composite literals.
+ * most of the work is to generate
+ * data statements for the constant
+ * part of the composite literal.
+ */
+func staticname(t *Type, ctxt int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
+ statuniqgen++
+ n = newname(Lookup(namebuf))
+ if !(ctxt != 0) {
+ n.Readonly = 1
+ }
+ addvar(n, t, PEXTERN)
+ return n
+}
+
+func isliteral(n *Node) int {
+ if n.Op == OLITERAL {
+ if n.Val.Ctype != CTNIL {
+ return 1
+ }
+ }
+ return 0
+}
+
+func simplename(n *Node) int {
+ if n.Op != ONAME {
+ goto no
+ }
+ if !(n.Addable != 0) {
+ goto no
+ }
+ if n.Class&PHEAP != 0 {
+ goto no
+ }
+ if n.Class == PPARAMREF {
+ goto no
+ }
+ return 1
+
+no:
+ return 0
+}
+
+func litas(l *Node, r *Node, init **NodeList) {
+ var a *Node
+
+ a = Nod(OAS, l, r)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+}
+
+const (
+ MODEDYNAM = 1
+ MODECONST = 2
+)
+
+func getdyn(n *Node, top int) int {
+ var nl *NodeList
+ var value *Node
+ var mode int
+
+ mode = 0
+ switch n.Op {
+ default:
+ if isliteral(n) != 0 {
+ return MODECONST
+ }
+ return MODEDYNAM
+
+ case OARRAYLIT:
+ if !(top != 0) && n.Type.Bound < 0 {
+ return MODEDYNAM
+ }
+ fallthrough
+
+ case OSTRUCTLIT:
+ break
+ }
+
+ for nl = n.List; nl != nil; nl = nl.Next {
+ value = nl.N.Right
+ mode |= getdyn(value, 0)
+ if mode == MODEDYNAM|MODECONST {
+ break
+ }
+ }
+
+ return mode
+}
+
+func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var nl *NodeList
+ var index *Node
+ var value *Node
+
+ for nl = n.List; nl != nil; nl = nl.Next {
+ r = nl.N
+ if r.Op != OKEY {
+ Fatal("structlit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ if pass == 1 && ctxt != 0 {
+ a = Nod(ODOT, var_, newname(index.Sym))
+ slicelit(ctxt, value, a, init)
+ } else if pass == 2 && ctxt == 0 {
+ a = Nod(ODOT, var_, newname(index.Sym))
+ slicelit(ctxt, value, a, init)
+ } else if pass == 3 {
+ break
+ }
+ continue
+ }
+
+ a = Nod(ODOT, var_, newname(index.Sym))
+ arraylit(ctxt, pass, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ a = Nod(ODOT, var_, newname(index.Sym))
+ structlit(ctxt, pass, value, a, init)
+ continue
+ }
+
+ if isliteral(value) != 0 {
+ if pass == 2 {
+ continue
+ }
+ } else if pass == 1 {
+ continue
+ }
+
+ // build list of var.field = expr
+ a = Nod(ODOT, var_, newname(index.Sym))
+
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ if pass == 1 {
+ walkexpr(&a, init) // add any assignments in r to top
+ if a.Op != OAS {
+ Fatal("structlit: not as")
+ }
+ a.Dodata = 2
+ } else {
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ }
+
+ *init = list(*init, a)
+ }
+}
+
+func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var index *Node
+ var value *Node
+
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+ if r.Op != OKEY {
+ Fatal("arraylit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ if pass == 1 && ctxt != 0 {
+ a = Nod(OINDEX, var_, index)
+ slicelit(ctxt, value, a, init)
+ } else if pass == 2 && ctxt == 0 {
+ a = Nod(OINDEX, var_, index)
+ slicelit(ctxt, value, a, init)
+ } else if pass == 3 {
+ break
+ }
+ continue
+ }
+
+ a = Nod(OINDEX, var_, index)
+ arraylit(ctxt, pass, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ a = Nod(OINDEX, var_, index)
+ structlit(ctxt, pass, value, a, init)
+ continue
+ }
+
+ if isliteral(index) != 0 && isliteral(value) != 0 {
+ if pass == 2 {
+ continue
+ }
+ } else if pass == 1 {
+ continue
+ }
+
+ // build list of var[index] = value
+ a = Nod(OINDEX, var_, index)
+
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ if pass == 1 {
+ walkexpr(&a, init)
+ if a.Op != OAS {
+ Fatal("arraylit: not as")
+ }
+ a.Dodata = 2
+ } else {
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ }
+
+ *init = list(*init, a)
+ }
+}
+
+func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var t *Type
+ var vstat *Node
+ var vauto *Node
+ var index *Node
+ var value *Node
+ var mode int
+
+ // make an array type
+ t = shallow(n.Type)
+
+ t.Bound = Mpgetfix(n.Right.Val.U.Xval)
+ t.Width = 0
+ t.Sym = nil
+ t.Haspointers = 0
+ dowidth(t)
+
+ if ctxt != 0 {
+ // put everything into static array
+ vstat = staticname(t, ctxt)
+
+ arraylit(ctxt, 1, n, vstat, init)
+ arraylit(ctxt, 2, n, vstat, init)
+
+ // copy static to slice
+ a = Nod(OSLICE, vstat, Nod(OKEY, nil, nil))
+
+ a = Nod(OAS, var_, a)
+ typecheck(&a, Etop)
+ a.Dodata = 2
+ *init = list(*init, a)
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. assign slice of allocated heap to var
+ // var = [0:]*auto
+ // 6. for each dynamic part assign to the slice
+ // var[i] = dynamic part
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. var = [0:]*auto
+ // 6. var[i] = dynamic part
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ vstat = nil
+
+ mode = getdyn(n, 1)
+ if mode&MODECONST != 0 {
+ vstat = staticname(t, ctxt)
+ arraylit(ctxt, 1, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto = temp(Ptrto(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ if n.Alloc != nil {
+ // temp allocated during order.c for dddarg
+ n.Alloc.Type = t
+
+ if vstat == nil {
+ a = Nod(OAS, n.Alloc, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a) // zero new temp
+ }
+
+ a = Nod(OADDR, n.Alloc, nil)
+ } else if n.Esc == EscNone {
+ a = temp(t)
+ if vstat == nil {
+ a = Nod(OAS, temp(t), nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a) // zero new temp
+ a = a.Left
+ }
+
+ a = Nod(OADDR, a, nil)
+ } else {
+ a = Nod(ONEW, nil, nil)
+ a.List = list1(typenod(t))
+ }
+
+ a = Nod(OAS, vauto, a)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ if vstat != nil {
+ // copy static to heap (4)
+ a = Nod(OIND, vauto, nil)
+
+ a = Nod(OAS, a, vstat)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ // make slice out of heap (5)
+ a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil)))
+
+ typecheck(&a, Etop)
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ // put dynamics into slice (6)
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+ if r.Op != OKEY {
+ Fatal("slicelit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+ a = Nod(OINDEX, var_, index)
+ a.Bounded = 1
+
+ // TODO need to check bounds?
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ break
+ }
+ arraylit(ctxt, 2, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ structlit(ctxt, 2, value, a, init)
+ continue
+ }
+
+ if isliteral(index) != 0 && isliteral(value) != 0 {
+ continue
+ }
+
+ // build list of var[c] = expr
+ a = Nod(OAS, a, value)
+
+ typecheck(&a, Etop)
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ *init = list(*init, a)
+ }
+}
+
+func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var nerr int
+ var b int64
+ var t *Type
+ var tk *Type
+ var tv *Type
+ var t1 *Type
+ var vstat *Node
+ var index *Node
+ var value *Node
+ var key *Node
+ var val *Node
+ var syma *Sym
+ var symb *Sym
+
+ ctxt = 0
+
+ // make the map var
+ nerr = nerrors
+
+ a = Nod(OMAKE, nil, nil)
+ a.List = list1(typenod(n.Type))
+ litas(var_, a, init)
+
+ // count the initializers
+ b = 0
+
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) != 0 && isliteral(value) != 0 {
+ b++
+ }
+ }
+
+ if b != 0 {
+ // build type [count]struct { a Tindex, b Tvalue }
+ t = n.Type
+
+ tk = t.Down
+ tv = t.Type
+
+ symb = Lookup("b")
+ t = typ(TFIELD)
+ t.Type = tv
+ t.Sym = symb
+
+ syma = Lookup("a")
+ t1 = t
+ t = typ(TFIELD)
+ t.Type = tk
+ t.Sym = syma
+ t.Down = t1
+
+ t1 = t
+ t = typ(TSTRUCT)
+ t.Type = t1
+
+ t1 = t
+ t = typ(TARRAY)
+ t.Bound = b
+ t.Type = t1
+
+ dowidth(t)
+
+ // make and initialize static array
+ vstat = staticname(t, ctxt)
+
+ b = 0
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) != 0 && isliteral(value) != 0 {
+ // build vstat[b].a = key;
+ a = Nodintconst(b)
+
+ a = Nod(OINDEX, vstat, a)
+ a = Nod(ODOT, a, newname(syma))
+ a = Nod(OAS, a, index)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ a.Dodata = 2
+ *init = list(*init, a)
+
+ // build vstat[b].b = value;
+ a = Nodintconst(b)
+
+ a = Nod(OINDEX, vstat, a)
+ a = Nod(ODOT, a, newname(symb))
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ a.Dodata = 2
+ *init = list(*init, a)
+
+ b++
+ }
+ }
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstat); i++ {
+ // map[vstat[i].a] = vstat[i].b
+ // }
+ index = temp(Types[TINT])
+
+ a = Nod(OINDEX, vstat, index)
+ a.Bounded = 1
+ a = Nod(ODOT, a, newname(symb))
+
+ r = Nod(OINDEX, vstat, index)
+ r.Bounded = 1
+ r = Nod(ODOT, r, newname(syma))
+ r = Nod(OINDEX, var_, r)
+
+ r = Nod(OAS, r, a)
+
+ a = Nod(OFOR, nil, nil)
+ a.Nbody = list1(r)
+
+ a.Ninit = list1(Nod(OAS, index, Nodintconst(0)))
+ a.Ntest = Nod(OLT, index, Nodintconst(t.Bound))
+ a.Nincr = Nod(OAS, index, Nod(OADD, index, Nodintconst(1)))
+
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ }
+
+ // put in dynamic entries one-at-a-time
+ key = nil
+
+ val = nil
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) != 0 && isliteral(value) != 0 {
+ continue
+ }
+
+ // build list of var[c] = expr.
+ // use temporary so that mapassign1 can have addressable key, val.
+ if key == nil {
+ key = temp(var_.Type.Down)
+ val = temp(var_.Type.Type)
+ }
+
+ a = Nod(OAS, key, r.Left)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ a = Nod(OAS, val, r.Right)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ a = Nod(OAS, Nod(OINDEX, var_, key), val)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ if nerr != nerrors {
+ break
+ }
+ }
+
+ if key != nil {
+ a = Nod(OVARKILL, key, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ a = Nod(OVARKILL, val, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ }
+}
+
+func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var t *Type
+ var a *Node
+ var vstat *Node
+ var r *Node
+
+ t = n.Type
+ switch n.Op {
+ default:
+ Fatal("anylit: not lit")
+ fallthrough
+
+ case OPTRLIT:
+ if !(Isptr[t.Etype] != 0) {
+ Fatal("anylit: not ptr")
+ }
+
+ if n.Right != nil {
+ r = Nod(OADDR, n.Right, nil)
+ typecheck(&r, Erv)
+ } else {
+ r = Nod(ONEW, nil, nil)
+ r.Typecheck = 1
+ r.Type = t
+ r.Esc = n.Esc
+ }
+
+ walkexpr(&r, init)
+ a = Nod(OAS, var_, r)
+
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ var_ = Nod(OIND, var_, nil)
+ typecheck(&var_, Erv|Easgn)
+ anylit(ctxt, n.Left, var_, init)
+
+ case OSTRUCTLIT:
+ if t.Etype != TSTRUCT {
+ Fatal("anylit: not struct")
+ }
+
+ if simplename(var_) != 0 && count(n.List) > 4 {
+ if ctxt == 0 {
+ // lay out static data
+ vstat = staticname(t, ctxt)
+
+ structlit(ctxt, 1, n, vstat, init)
+
+ // copy static to var
+ a = Nod(OAS, var_, vstat)
+
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ // add expressions to automatic
+ structlit(ctxt, 2, n, var_, init)
+
+ break
+ }
+
+ structlit(ctxt, 1, n, var_, init)
+ structlit(ctxt, 2, n, var_, init)
+ break
+ }
+
+ // initialize of not completely specified
+ if simplename(var_) != 0 || count(n.List) < structcount(t) {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ structlit(ctxt, 3, n, var_, init)
+
+ case OARRAYLIT:
+ if t.Etype != TARRAY {
+ Fatal("anylit: not array")
+ }
+ if t.Bound < 0 {
+ slicelit(ctxt, n, var_, init)
+ break
+ }
+
+ if simplename(var_) != 0 && count(n.List) > 4 {
+ if ctxt == 0 {
+ // lay out static data
+ vstat = staticname(t, ctxt)
+
+ arraylit(1, 1, n, vstat, init)
+
+ // copy static to automatic
+ a = Nod(OAS, var_, vstat)
+
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ // add expressions to automatic
+ arraylit(ctxt, 2, n, var_, init)
+
+ break
+ }
+
+ arraylit(ctxt, 1, n, var_, init)
+ arraylit(ctxt, 2, n, var_, init)
+ break
+ }
+
+ // initialize of not completely specified
+ if simplename(var_) != 0 || int64(count(n.List)) < t.Bound {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ arraylit(ctxt, 3, n, var_, init)
+
+ case OMAPLIT:
+ if t.Etype != TMAP {
+ Fatal("anylit: not map")
+ }
+ maplit(ctxt, n, var_, init)
+ }
+}
+
+func oaslit(n *Node, init **NodeList) int {
+ var ctxt int
+
+ if n.Left == nil || n.Right == nil {
+ goto no
+ }
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto no
+ }
+ if !(simplename(n.Left) != 0) {
+ goto no
+ }
+ if !Eqtype(n.Left.Type, n.Right.Type) {
+ goto no
+ }
+
+ // context is init() function.
+ // implies generated data executed
+ // exactly once and not subject to races.
+ ctxt = 0
+
+ // if(n->dodata == 1)
+ // ctxt = 1;
+
+ switch n.Right.Op {
+ default:
+ goto no
+
+ case OSTRUCTLIT,
+ OARRAYLIT,
+ OMAPLIT:
+ if vmatch1(n.Left, n.Right) != 0 {
+ goto no
+ }
+ anylit(ctxt, n.Right, n.Left, init)
+ }
+
+ n.Op = OEMPTY
+ return 1
+
+ // not a special composit literal assignment
+no:
+ return 0
+}
+
+func getlit(lit *Node) int {
+ if Smallintconst(lit) != 0 {
+ return int(Mpgetfix(lit.Val.U.Xval))
+ }
+ return -1
+}
+
+func stataddr(nam *Node, n *Node) int {
+ var l int
+
+ if n == nil {
+ goto no
+ }
+
+ switch n.Op {
+ case ONAME:
+ *nam = *n
+ return int(n.Addable)
+
+ case ODOT:
+ if !(stataddr(nam, n.Left) != 0) {
+ break
+ }
+ nam.Xoffset += n.Xoffset
+ nam.Type = n.Type
+ return 1
+
+ case OINDEX:
+ if n.Left.Type.Bound < 0 {
+ break
+ }
+ if !(stataddr(nam, n.Left) != 0) {
+ break
+ }
+ l = getlit(n.Right)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type.Width != 0 && Thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ break
+ }
+ nam.Xoffset += int64(l) * n.Type.Width
+ nam.Type = n.Type
+ return 1
+ }
+
+no:
+ return 0
+}
+
+func initplan(n *Node) {
+ var p *InitPlan
+ var a *Node
+ var l *NodeList
+
+ if n.Initplan != nil {
+ return
+ }
+ p = new(InitPlan)
+ n.Initplan = p
+ switch n.Op {
+ default:
+ Fatal("initplan")
+ fallthrough
+
+ case OARRAYLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY || !(Smallintconst(a.Left) != 0) {
+ Fatal("initplan arraylit")
+ }
+ addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.Xval), nil, a.Right)
+ }
+
+ case OSTRUCTLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY || a.Left.Type == nil {
+ Fatal("initplan structlit")
+ }
+ addvalue(p, a.Left.Type.Width, nil, a.Right)
+ }
+
+ case OMAPLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY {
+ Fatal("initplan maplit")
+ }
+ addvalue(p, -1, a.Left, a.Right)
+ }
+ }
+}
+
+func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
+ var i int
+ var q *InitPlan
+ var e *InitEntry
+
+ // special case: zero can be dropped entirely
+ if iszero(n) != 0 {
+ p.Zero += n.Type.Width
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) != 0 {
+ initplan(n)
+ q = n.Initplan
+ for i = 0; i < len(q.E); i++ {
+ e = entry(p)
+ *e = q.E[i]
+ e.Xoffset += xoffset
+ }
+
+ return
+ }
+
+ // add to plan
+ if n.Op == OLITERAL {
+ p.Lit += n.Type.Width
+ } else {
+ p.Expr += n.Type.Width
+ }
+
+ e = entry(p)
+ e.Xoffset = xoffset
+ e.Expr = n
+}
+
+func iszero(n *Node) int {
+ var l *NodeList
+
+ switch n.Op {
+ case OLITERAL:
+ switch n.Val.Ctype {
+ default:
+ Dump("unexpected literal", n)
+ Fatal("iszero")
+ fallthrough
+
+ case CTNIL:
+ return 1
+
+ case CTSTR:
+ return bool2int(n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0)
+
+ case CTBOOL:
+ return bool2int(n.Val.U.Bval == 0)
+
+ case CTINT,
+ CTRUNE:
+ return bool2int(mpcmpfixc(n.Val.U.Xval, 0) == 0)
+
+ case CTFLT:
+ return bool2int(mpcmpfltc(n.Val.U.Fval, 0) == 0)
+
+ case CTCPLX:
+ return bool2int(mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0)
+ }
+
+ case OARRAYLIT:
+ if Isslice(n.Type) != 0 {
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ for l = n.List; l != nil; l = l.Next {
+ if !(iszero(l.N.Right) != 0) {
+ return 0
+ }
+ }
+ return 1
+ }
+
+ return 0
+}
+
+func isvaluelit(n *Node) int {
+ return bool2int((n.Op == OARRAYLIT && Isfixedarray(n.Type) != 0) || n.Op == OSTRUCTLIT)
+}
+
+func entry(p *InitPlan) *InitEntry {
+ p.E = append(p.E, InitEntry{})
+ return &p.E[len(p.E)-1]
+}
+
+func gen_as_init(n *Node) int {
+ var nr *Node
+ var nl *Node
+ var nam Node
+ var nod1 Node
+
+ if n.Dodata == 0 {
+ goto no
+ }
+
+ nr = n.Right
+ nl = n.Left
+ if nr == nil {
+ if !(stataddr(&nam, nl) != 0) {
+ goto no
+ }
+ if nam.Class != PEXTERN {
+ goto no
+ }
+ goto yes
+ }
+
+ if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
+ goto no
+ }
+
+ if !(stataddr(&nam, nl) != 0) {
+ goto no
+ }
+
+ if nam.Class != PEXTERN {
+ goto no
+ }
+
+ switch nr.Op {
+ default:
+ goto no
+
+ case OCONVNOP:
+ nr = nr.Left
+ if nr == nil || nr.Op != OSLICEARR {
+ goto no
+ }
+ fallthrough
+
+ // fall through
+ case OSLICEARR:
+ if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
+ nr = nr.Left
+ goto slice
+ }
+
+ goto no
+
+ case OLITERAL:
+ break
+ }
+
+ switch nr.Type.Etype {
+ default:
+ goto no
+
+ case TBOOL,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TPTR32,
+ TPTR64,
+ TFLOAT32,
+ TFLOAT64:
+ gdata(&nam, nr, int(nr.Type.Width))
+
+ case TCOMPLEX64,
+ TCOMPLEX128:
+ gdatacomplex(&nam, nr.Val.U.Cval)
+
+ case TSTRING:
+ gdatastring(&nam, nr.Val.U.Sval)
+ }
+
+yes:
+ return 1
+
+slice:
+ gused(nil) // in case the data is the dest of a goto
+ nl = nr
+ if nr == nil || nr.Op != OADDR {
+ goto no
+ }
+ nr = nr.Left
+ if nr == nil || nr.Op != ONAME {
+ goto no
+ }
+
+ // nr is the array being converted to a slice
+ if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
+ goto no
+ }
+
+ nam.Xoffset += int64(Array_array)
+ gdata(&nam, nl, int(Types[Tptr].Width))
+
+ nam.Xoffset += int64(Array_nel) - int64(Array_array)
+ Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+ gdata(&nam, &nod1, Widthint)
+
+ nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+ gdata(&nam, &nod1, Widthint)
+
+ goto yes
+
+no:
+ if n.Dodata == 2 {
+ Dump("\ngen_as_init", n)
+ Fatal("gen_as_init couldnt make data statement")
+ }
+
+ return 0
+}
diff --git a/src/cmd/internal/gc/subr.go b/src/cmd/internal/gc/subr.go
new file mode 100644
index 0000000000..c28bfbdaa7
--- /dev/null
+++ b/src/cmd/internal/gc/subr.go
@@ -0,0 +1,3932 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type Error struct {
+ lineno int
+ seq int
+ msg string
+}
+
+var errors []Error
+
+var nerr int
+
+var merr int
+
+func errorexit() {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ os.Exit(2)
+}
+
+func parserline() int {
+ if yychar_subr != 0 && yychar_subr != -2 { // parser has one symbol lookahead
+ return int(prevlineno)
+ }
+ return int(lineno)
+}
+
+func adderrorname(n *Node) {
+ var old string
+
+ if n.Op != ODOT {
+ return
+ }
+ old = fmt.Sprintf("%v: undefined: %v\n", n.Line(), Nconv(n.Left, 0))
+ if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+ errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), Nconv(n.Left, 0), Nconv(n, 0))
+ }
+}
+
+func adderr(line int, format string, args []interface{}) {
+ errors = append(errors, Error{
+ seq: len(errors),
+ lineno: line,
+ msg: fmt.Sprintf("%v: %s\n", Ctxt.Line(line), fmt.Sprintf(format, args...)),
+ })
+}
+
+type errcmp []Error
+
+func (x errcmp) Len() int {
+ return len(x)
+}
+
+func (x errcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x errcmp) Less(i, j int) bool {
+ var a *Error
+ var b *Error
+
+ a = &x[i]
+ b = &x[j]
+ if a.lineno != b.lineno {
+ return a.lineno-b.lineno < 0
+ }
+ if a.seq != b.seq {
+ return a.seq-b.seq < 0
+ }
+ return stringsCompare(a.msg, b.msg) < 0
+}
+
+func Flusherrors() {
+ var i int
+
+ obj.Bflush(&bstdout)
+ if len(errors) == 0 {
+ return
+ }
+ sort.Sort(errcmp(errors[:len(errors)]))
+ for i = 0; i < len(errors); i++ {
+ if i == 0 || errors[i].msg != errors[i-1].msg {
+ fmt.Printf("%s", errors[i].msg)
+ }
+ }
+ errors = errors[:0]
+}
+
+func hcrash() {
+ if Debug['h'] != 0 {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ var x *int
+ *x = 0
+ }
+}
+
+func yyerrorl(line int, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
+ errorexit()
+ }
+}
+
+var yystate int
+
+var yychar_subr int
+
+var yyerror_lastsyntax int
+
+func Yyerror(fmt_ string, args ...interface{}) {
+ var i int
+
+ if strings.HasPrefix(fmt_, "syntax error") {
+ nsyntaxerrors++
+
+ if Debug['x'] != 0 {
+ fmt.Printf("yyerror: yystate=%d yychar=%d\n", yystate, yychar_subr)
+ }
+
+ // An unexpected EOF caused a syntax error. Use the previous
+ // line number since getc generated a fake newline character.
+ if curio.eofnl != 0 {
+ lexlineno = prevlineno
+ }
+
+ // only one syntax error per line
+ if int32(yyerror_lastsyntax) == lexlineno {
+ return
+ }
+ yyerror_lastsyntax = int(lexlineno)
+
+ if strings.Contains(fmt_, "{ or {") || strings.Contains(fmt_, " or ?") || strings.Contains(fmt_, " or @") {
+ // The grammar has { and LBRACE but both show up as {.
+ // Rewrite syntax error referring to "{ or {" to say just "{".
+ // The grammar has ? and @ but only for reading imports.
+ // Silence them in ordinary errors.
+ fmt_ = strings.Replace(fmt_, "{ or {", "{", -1)
+ fmt_ = strings.Replace(fmt_, " or ?", "", -1)
+ fmt_ = strings.Replace(fmt_, " or @", "", -1)
+ }
+
+ // look for parse state-specific errors in list (see go.errors).
+ for i = 0; i < len(yymsg); i++ {
+ if yymsg[i].yystate == yystate && yymsg[i].yychar == yychar_subr {
+ yyerrorl(int(lexlineno), "syntax error: %s", yymsg[i].msg)
+ return
+ }
+ }
+
+ // plain "syntax error" gets "near foo" added
+ if fmt_ == "syntax error" {
+ yyerrorl(int(lexlineno), "syntax error near %s", lexbuf.String())
+ return
+ }
+
+ // if bison says "syntax error, more info"; print "syntax error: more info".
+ if fmt_[12] == ',' {
+ yyerrorl(int(lexlineno), "syntax error:%s", fmt_[13:])
+ return
+ }
+
+ yyerrorl(int(lexlineno), "%s", fmt_)
+ return
+ }
+
+ adderr(parserline(), fmt_, args)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
+ errorexit()
+ }
+}
+
+func Warn(fmt_ string, args ...interface{}) {
+
+ adderr(parserline(), fmt_, args)
+
+ hcrash()
+}
+
+func Warnl(line int, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args)
+ if Debug['m'] != 0 {
+ Flusherrors()
+ }
+}
+
+func Fatal(fmt_ string, args ...interface{}) {
+
+ Flusherrors()
+
+ fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
+ fmt.Printf(fmt_, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(obj.Getgoversion(), "release") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ }
+
+ hcrash()
+ errorexit()
+}
+
+func linehist(file string, off int32, relative int) {
+ if Debug['i'] != 0 {
+ if file != "" {
+ if off < 0 {
+ fmt.Printf("pragma %s", file)
+ } else if off > 0 {
+ fmt.Printf("line %s", file)
+ } else {
+ fmt.Printf("import %s", file)
+ }
+ } else {
+ fmt.Printf("end of import")
+ }
+ fmt.Printf(" at line %v\n", Ctxt.Line(int(lexlineno)))
+ }
+
+ if off < 0 && file[0] != '/' && !(relative != 0) {
+ file = fmt.Sprintf("%s/%s", Ctxt.Pathname, file)
+ }
+ obj.Linklinehist(Ctxt, int(lexlineno), file, int(off))
+}
+
+func setlineno(n *Node) int32 {
+ var lno int32
+
+ lno = lineno
+ if n != nil {
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OPACK,
+ OLITERAL:
+ break
+
+ default:
+ lineno = n.Lineno
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("setlineno: line 0")
+ }
+ lineno = lno
+ }
+ }
+ }
+
+ return lno
+}
+
+func stringhash(p string) uint32 {
+ var h uint32
+ var c int
+
+ h = 0
+ for {
+ c, p = intstarstringplusplus(p)
+ if c == 0 {
+ break
+ }
+ h = h*PRIME1 + uint32(c)
+ }
+
+ if int32(h) < 0 {
+ h = -h
+ if int32(h) < 0 {
+ h = 0
+ }
+ }
+
+ return h
+}
+
+func Lookup(name string) *Sym {
+ return Pkglookup(name, localpkg)
+}
+
+func Pkglookup(name string, pkg *Pkg) *Sym {
+ var s *Sym
+ var h uint32
+ var c int
+
+ h = stringhash(name) % NHASH
+ c = int(name[0])
+ for s = hash[h]; s != nil; s = s.Link {
+ if int(s.Name[0]) != c || s.Pkg != pkg {
+ continue
+ }
+ if s.Name == name {
+ return s
+ }
+ }
+
+ s = new(Sym)
+ s.Name = name
+
+ s.Pkg = pkg
+
+ s.Link = hash[h]
+ hash[h] = s
+ s.Lexical = LNAME
+
+ return s
+}
+
+func restrictlookup(name string, pkg *Pkg) *Sym {
+ if !exportname(name) && pkg != localpkg {
+ Yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
+ }
+ return Pkglookup(name, pkg)
+}
+
+// find all the exported symbols in package opkg
+// and make them available in the current package
+func importdot(opkg *Pkg, pack *Node) {
+ var s *Sym
+ var s1 *Sym
+ var h uint32
+ var n int
+ var pkgerror string
+
+ n = 0
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Pkg != opkg {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 = Lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror = fmt.Sprintf("during import \"%v\"", Zconv(opkg.Path, 0))
+ redeclare(s1, pkgerror)
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ s1.Def.Pack = pack
+ s1.Origpkg = opkg
+ n++
+ }
+ }
+
+ if n == 0 {
+ // can't possibly be used - there were no symbols
+ yyerrorl(int(pack.Lineno), "imported and not used: \"%v\"", Zconv(opkg.Path, 0))
+ }
+}
+
+func Nod(op int, nleft *Node, nright *Node) *Node {
+ var n *Node
+
+ n = new(Node)
+ n.Op = uint8(op)
+ n.Left = nleft
+ n.Right = nright
+ n.Lineno = int32(parserline())
+ n.Xoffset = BADWIDTH
+ n.Orig = n
+ n.Curfn = Curfn
+ return n
+}
+
+func saveorignode(n *Node) {
+ var norig *Node
+
+ if n.Orig != nil {
+ return
+ }
+ norig = Nod(int(n.Op), nil, nil)
+ *norig = *n
+ n.Orig = norig
+}
+
+// ispaddedfield reports whether the given field
+// is followed by padding. For the case where t is
+// the last field, total gives the size of the enclosing struct.
+func ispaddedfield(t *Type, total int64) int {
+ if t.Etype != TFIELD {
+ Fatal("ispaddedfield called non-field %v", Tconv(t, 0))
+ }
+ if t.Down == nil {
+ return bool2int(t.Width+t.Type.Width != total)
+ }
+ return bool2int(t.Width+t.Type.Width != t.Down.Width)
+}
+
+func algtype1(t *Type, bad **Type) int {
+ var a int
+ var ret int
+ var t1 *Type
+
+ if bad != nil {
+ *bad = nil
+ }
+ if t.Broke != 0 {
+ return AMEM
+ }
+ if t.Noalg != 0 {
+ return ANOEQ
+ }
+
+ switch t.Etype {
+ // will be defined later.
+ case TANY,
+ TFORW:
+ *bad = t
+
+ return -1
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64,
+ TCHAN,
+ TUNSAFEPTR:
+ return AMEM
+
+ case TFUNC,
+ TMAP:
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+
+ case TFLOAT32:
+ return AFLOAT32
+
+ case TFLOAT64:
+ return AFLOAT64
+
+ case TCOMPLEX64:
+ return ACPLX64
+
+ case TCOMPLEX128:
+ return ACPLX128
+
+ case TSTRING:
+ return ASTRING
+
+ case TINTER:
+ if isnilinter(t) != 0 {
+ return ANILINTER
+ }
+ return AINTER
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+ }
+
+ a = algtype1(t.Type, bad)
+ if a == ANOEQ || a == AMEM {
+ if a == ANOEQ && bad != nil {
+ *bad = t
+ }
+ return a
+ }
+
+ return -1 // needs special compare
+
+ case TSTRUCT:
+ if t.Type != nil && t.Type.Down == nil && !isblanksym(t.Type.Sym) {
+ // One-field struct is same as that one field alone.
+ return algtype1(t.Type.Type, bad)
+ }
+
+ ret = AMEM
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ // All fields must be comparable.
+ a = algtype1(t1.Type, bad)
+
+ if a == ANOEQ {
+ return ANOEQ
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) != 0 {
+ ret = -1
+ continue
+ }
+ }
+
+ return ret
+ }
+
+ Fatal("algtype1: unexpected type %v", Tconv(t, 0))
+ return 0
+}
+
+func algtype(t *Type) int {
+ var a int
+
+ a = algtype1(t, nil)
+ if a == AMEM || a == ANOEQ {
+ if Isslice(t) != 0 {
+ return ASLICE
+ }
+ switch t.Width {
+ case 0:
+ return a + AMEM0 - AMEM
+
+ case 1:
+ return a + AMEM8 - AMEM
+
+ case 2:
+ return a + AMEM16 - AMEM
+
+ case 4:
+ return a + AMEM32 - AMEM
+
+ case 8:
+ return a + AMEM64 - AMEM
+
+ case 16:
+ return a + AMEM128 - AMEM
+ }
+ }
+
+ return a
+}
+
+func maptype(key *Type, val *Type) *Type {
+ var t *Type
+ var bad *Type
+ var atype int
+ var mtype int
+
+ if key != nil {
+ atype = algtype1(key, &bad)
+ if bad == nil {
+ mtype = int(key.Etype)
+ } else {
+ mtype = int(bad.Etype)
+ }
+ switch mtype {
+ default:
+ if atype == ANOEQ {
+ Yyerror("invalid map key type %v", Tconv(key, 0))
+ }
+
+ // will be resolved later.
+ case TANY:
+ break
+
+ // map[key] used during definition of key.
+ // postpone check until key is fully defined.
+ // if there are multiple uses of map[key]
+ // before key is fully defined, the error
+ // will only be printed for the first one.
+ // good enough.
+ case TFORW:
+ if key.Maplineno == 0 {
+ key.Maplineno = lineno
+ }
+ }
+ }
+
+ t = typ(TMAP)
+ t.Down = key
+ t.Type = val
+ return t
+}
+
+func typ(et int) *Type {
+ var t *Type
+
+ t = new(Type)
+ t.Etype = uint8(et)
+ t.Width = BADWIDTH
+ t.Lineno = int(lineno)
+ t.Orig = t
+ return t
+}
+
+type methcmp []*Type
+
+func (x methcmp) Len() int {
+ return len(x)
+}
+
+func (x methcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x methcmp) Less(i, j int) bool {
+ var a *Type
+ var b *Type
+ var k int
+
+ a = x[i]
+ b = x[j]
+ if a.Sym == nil && b.Sym == nil {
+ return false
+ }
+ if a.Sym == nil {
+ return true
+ }
+ if b.Sym == nil {
+ return 1 < 0
+ }
+ k = stringsCompare(a.Sym.Name, b.Sym.Name)
+ if k != 0 {
+ return k < 0
+ }
+ if !exportname(a.Sym.Name) {
+ k = stringsCompare(a.Sym.Pkg.Path.S, b.Sym.Pkg.Path.S)
+ if k != 0 {
+ return k < 0
+ }
+ }
+
+ return false
+}
+
+func sortinter(t *Type) *Type {
+ var f *Type
+ var i int
+ var a []*Type
+
+ if t.Type == nil || t.Type.Down == nil {
+ return t
+ }
+
+ i = 0
+ for f = t.Type; f != nil; f = f.Down {
+ i++
+ }
+ a = make([]*Type, i)
+ i = 0
+ for f = t.Type; f != nil; f = f.Down {
+ a[i] = f
+ i++
+ }
+ sort.Sort(methcmp(a[:i]))
+ for {
+ tmp11 := i
+ i--
+ if !(tmp11 > 0) {
+ break
+ }
+ a[i].Down = f
+ f = a[i]
+ }
+
+ t.Type = f
+ return t
+}
+
+func Nodintconst(v int64) *Node {
+ var c *Node
+
+ c = Nod(OLITERAL, nil, nil)
+ c.Addable = 1
+ c.Val.U.Xval = new(Mpint)
+ Mpmovecfix(c.Val.U.Xval, v)
+ c.Val.Ctype = CTINT
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func nodfltconst(v *Mpflt) *Node {
+ var c *Node
+
+ c = Nod(OLITERAL, nil, nil)
+ c.Addable = 1
+ c.Val.U.Fval = new(Mpflt)
+ mpmovefltflt(c.Val.U.Fval, v)
+ c.Val.Ctype = CTFLT
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func Nodconst(n *Node, t *Type, v int64) {
+ *n = Node{}
+ n.Op = OLITERAL
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Xval = new(Mpint)
+ Mpmovecfix(n.Val.U.Xval, v)
+ n.Val.Ctype = CTINT
+ n.Type = t
+
+ if Isfloat[t.Etype] != 0 {
+ Fatal("nodconst: bad type %v", Tconv(t, 0))
+ }
+}
+
+func nodnil() *Node {
+ var c *Node
+
+ c = Nodintconst(0)
+ c.Val.Ctype = CTNIL
+ c.Type = Types[TNIL]
+ return c
+}
+
+func Nodbool(b int) *Node {
+ var c *Node
+
+ c = Nodintconst(0)
+ c.Val.Ctype = CTBOOL
+ c.Val.U.Bval = int16(b)
+ c.Type = idealbool
+ return c
+}
+
+func aindex(b *Node, t *Type) *Type {
+ var r *Type
+ var bound int64
+
+ bound = -1 // open bound
+ typecheck(&b, Erv)
+ if b != nil {
+ switch consttype(b) {
+ default:
+ Yyerror("array bound must be an integer expression")
+
+ case CTINT,
+ CTRUNE:
+ bound = Mpgetfix(b.Val.U.Xval)
+ if bound < 0 {
+ Yyerror("array bound must be non negative")
+ }
+ }
+ }
+
+ // fixed array
+ r = typ(TARRAY)
+
+ r.Type = t
+ r.Bound = bound
+ return r
+}
+
+func treecopy(n *Node) *Node {
+ var m *Node
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ default:
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Orig = m
+ m.Left = treecopy(n.Left)
+ m.Right = treecopy(n.Right)
+ m.List = listtreecopy(n.List)
+ if m.Defn != nil {
+ panic("abort")
+ }
+
+ case ONONAME:
+ if n.Sym == Lookup("iota") {
+ // Not sure yet whether this is the real iota,
+ // but make a copy of the Node* just in case,
+ // so that all the copies of this const definition
+ // don't have the same iota value.
+ m = Nod(OXXX, nil, nil)
+
+ *m = *n
+ m.Iota = iota_
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ONAME,
+ OLITERAL,
+ OTYPE:
+ m = n
+ }
+
+ return m
+}
+
+func isnil(n *Node) int {
+ if n == nil {
+ return 0
+ }
+ if n.Op != OLITERAL {
+ return 0
+ }
+ if n.Val.Ctype != CTNIL {
+ return 0
+ }
+ return 1
+}
+
+func isptrto(t *Type, et int) int {
+ if t == nil {
+ return 0
+ }
+ if !(Isptr[t.Etype] != 0) {
+ return 0
+ }
+ t = t.Type
+ if t == nil {
+ return 0
+ }
+ if int(t.Etype) != et {
+ return 0
+ }
+ return 1
+}
+
+func Istype(t *Type, et int) int {
+ return bool2int(t != nil && int(t.Etype) == et)
+}
+
+func Isfixedarray(t *Type) int {
+ return bool2int(t != nil && t.Etype == TARRAY && t.Bound >= 0)
+}
+
+func Isslice(t *Type) int {
+ return bool2int(t != nil && t.Etype == TARRAY && t.Bound < 0)
+}
+
+func isblank(n *Node) bool {
+ if n == nil {
+ return false
+ }
+ return isblanksym(n.Sym)
+}
+
+func isblanksym(s *Sym) bool {
+ return s != nil && s.Name == "_"
+}
+
+func Isinter(t *Type) int {
+ return bool2int(t != nil && t.Etype == TINTER)
+}
+
+func isnilinter(t *Type) int {
+ if !(Isinter(t) != 0) {
+ return 0
+ }
+ if t.Type != nil {
+ return 0
+ }
+ return 1
+}
+
+func isideal(t *Type) int {
+ if t == nil {
+ return 0
+ }
+ if t == idealstring || t == idealbool {
+ return 1
+ }
+ switch t.Etype {
+ case TNIL,
+ TIDEAL:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * given receiver of type t (t == r or t == *r)
+ * return type to hang methods off (r).
+ */
+func methtype(t *Type, mustname int) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // strip away pointer if it's there
+ if Isptr[t.Etype] != 0 {
+ if t.Sym != nil {
+ return nil
+ }
+ t = t.Type
+ if t == nil {
+ return nil
+ }
+ }
+
+ // need a type name
+ if t.Sym == nil && (mustname != 0 || t.Etype != TSTRUCT) {
+ return nil
+ }
+
+ // check types
+ if !(issimple[t.Etype] != 0) {
+ switch t.Etype {
+ default:
+ return nil
+
+ case TSTRUCT,
+ TARRAY,
+ TMAP,
+ TCHAN,
+ TSTRING,
+ TFUNC:
+ break
+ }
+ }
+
+ return t
+}
+
+func cplxsubtype(et int) int {
+ switch et {
+ case TCOMPLEX64:
+ return TFLOAT32
+
+ case TCOMPLEX128:
+ return TFLOAT64
+ }
+
+ Fatal("cplxsubtype: %v\n", Econv(int(et), 0))
+ return 0
+}
+
+func eqnote(a, b *Strlit) bool {
+ return a == b || a != nil && b != nil && a.S == b.S
+}
+
+type TypePairList struct {
+ t1 *Type
+ t2 *Type
+ next *TypePairList
+}
+
+func onlist(l *TypePairList, t1 *Type, t2 *Type) int {
+ for ; l != nil; l = l.next {
+ if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
+ return 1
+ }
+ }
+ return 0
+}
+
+// Return 1 if t1 and t2 are identical, following the spec rules.
+//
+// Any cyclic type must go through a named type, and if one is
+// named, it is only identical to the other if they are the same
+// pointer (t1 == t2), so there's no chance of chasing cycles
+// ad infinitum, so no need for a depth counter.
+func Eqtype(t1 *Type, t2 *Type) bool {
+ return eqtype1(t1, t2, nil) != 0
+}
+
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
+ var l TypePairList
+
+ if t1 == t2 {
+ return 1
+ }
+ if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
+ return 0
+ }
+ if t1.Sym != nil || t2.Sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t1.Etype {
+ case TUINT8:
+ if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
+ return 1
+ }
+
+ case TINT,
+ TINT32:
+ if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
+ return 1
+ }
+ }
+
+ return 0
+ }
+
+ if onlist(assumed_equal, t1, t2) != 0 {
+ return 1
+ }
+ l.next = assumed_equal
+ l.t1 = t1
+ l.t2 = t2
+
+ switch t1.Etype {
+ case TINTER,
+ TSTRUCT:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; (func() { t1 = t1.Down; t2 = t2.Down })() {
+ if t1.Etype != TFIELD || t2.Etype != TFIELD {
+ Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
+ }
+ if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !(eqtype1(t1.Type, t2.Type, &l) != 0) || !eqnote(t1.Note, t2.Note) {
+ goto no
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ goto yes
+ }
+ goto no
+
+ // Loop over structs: receiver, in, out.
+ case TFUNC:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; (func() { t1 = t1.Down; t2 = t2.Down })() {
+ var ta *Type
+ var tb *Type
+
+ if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ Fatal("func missing struct: %v %v", Tconv(t1, 0), Tconv(t2, 0))
+ }
+
+ // Loop over fields in structs, ignoring argument names.
+ ta = t1.Type
+ tb = t2.Type
+ for ; ta != nil && tb != nil; (func() { ta = ta.Down; tb = tb.Down })() {
+ if ta.Etype != TFIELD || tb.Etype != TFIELD {
+ Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
+ }
+ if ta.Isddd != tb.Isddd || !(eqtype1(ta.Type, tb.Type, &l) != 0) {
+ goto no
+ }
+ }
+
+ if ta != nil || tb != nil {
+ goto no
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ goto yes
+ }
+ goto no
+
+ case TARRAY:
+ if t1.Bound != t2.Bound {
+ goto no
+ }
+
+ case TCHAN:
+ if t1.Chan != t2.Chan {
+ goto no
+ }
+ }
+
+ if eqtype1(t1.Down, t2.Down, &l) != 0 && eqtype1(t1.Type, t2.Type, &l) != 0 {
+ goto yes
+ }
+ goto no
+
+yes:
+ return 1
+
+no:
+ return 0
+}
+
+// Are t1 and t2 equal struct types when field names are ignored?
+// For deciding whether the result struct from g can be copied
+// directly when compiling f(g()).
+func eqtypenoname(t1 *Type, t2 *Type) int {
+ if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ return 0
+ }
+
+ t1 = t1.Type
+ t2 = t2.Type
+ for {
+ if !Eqtype(t1, t2) {
+ return 0
+ }
+ if t1 == nil {
+ return 1
+ }
+ t1 = t1.Down
+ t2 = t2.Down
+ }
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return 0.
+func assignop(src *Type, dst *Type, why *string) int {
+ var missing *Type
+ var have *Type
+ var ptr int
+
+ if why != nil {
+ *why = ""
+ }
+
+ // TODO(rsc,lvd): This behaves poorly in the presence of inlining.
+ // https://golang.org/issue/2795
+ if safemode != 0 && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR {
+ Yyerror("cannot use unsafe.Pointer")
+ errorexit()
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
+ return 0
+ }
+
+ // 1. src type is identical to dst.
+ if Eqtype(src, dst) {
+ return OCONVNOP
+ }
+
+ // 2. src and dst have identical underlying types
+ // and either src or dst is not a named type or
+ // both are empty interface types.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab.
+ if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src) != 0) {
+ return OCONVNOP
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.Etype == TINTER && src.Etype != TNIL {
+ if implements(src, dst, &missing, &have, &ptr) != 0 {
+ return OCONVIFACE
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke != 0 || missing.Type.Broke != 0) {
+ return OCONVIFACE
+ }
+
+ if why != nil {
+ if isptrto(src, TINTER) != 0 {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(src, 0))
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface != 0 {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ } else if have != nil && have.Sym == missing.Sym {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else if ptr != 0 {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ } else if have != nil {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ }
+ }
+
+ return 0
+ }
+
+ if isptrto(dst, TINTER) != 0 {
+ if why != nil {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(dst, 0))
+ }
+ return 0
+ }
+
+ if src.Etype == TINTER && dst.Etype != TBLANK {
+ if why != nil && implements(dst, src, &missing, &have, &ptr) != 0 {
+ *why = ": need type assertion"
+ }
+ return 0
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.Etype == TCHAN && src.Chan == Cboth && dst.Etype == TCHAN {
+ if Eqtype(src.Type, dst.Type) && (src.Sym == nil || dst.Sym == nil) {
+ return OCONVNOP
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Etype == TNIL {
+ switch dst.Etype {
+ case TARRAY:
+ if dst.Bound != -100 { // not slice
+ break
+ }
+ fallthrough
+
+ case TPTR32,
+ TPTR64,
+ TFUNC,
+ TMAP,
+ TCHAN,
+ TINTER:
+ return OCONVNOP
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by defaultlit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Etype == TBLANK {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return 0.
+func convertop(src *Type, dst *Type, why *string) int {
+ var op int
+
+ if why != nil {
+ *why = ""
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil {
+ return 0
+ }
+
+ // 1. src can be assigned to dst.
+ op = assignop(src, dst, why)
+ if op != 0 {
+ return op
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.Etype == TINTER || dst.Etype == TINTER {
+ return 0
+ }
+ if why != nil {
+ *why = ""
+ }
+
+ // 2. src and dst have identical underlying types.
+ if Eqtype(src.Orig, dst.Orig) {
+ return OCONVNOP
+ }
+
+ // 3. src and dst are unnamed pointer types
+ // and their base types have identical underlying types.
+ if Isptr[src.Etype] != 0 && Isptr[dst.Etype] != 0 && src.Sym == nil && dst.Sym == nil {
+ if Eqtype(src.Type.Orig, dst.Type.Orig) {
+ return OCONVNOP
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (Isint[src.Etype] != 0 || Isfloat[src.Etype] != 0) && (Isint[dst.Etype] != 0 || Isfloat[dst.Etype] != 0) {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 5. src and dst are both complex types.
+ if Iscomplex[src.Etype] != 0 && Iscomplex[dst.Etype] != 0 {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if Isint[src.Etype] != 0 && dst.Etype == TSTRING {
+ return ORUNESTR
+ }
+
+ if Isslice(src) != 0 && dst.Etype == TSTRING {
+ if src.Type.Etype == bytetype.Etype {
+ return OARRAYBYTESTR
+ }
+ if src.Type.Etype == runetype.Etype {
+ return OARRAYRUNESTR
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.Etype == TSTRING && Isslice(dst) != 0 {
+ if dst.Type.Etype == bytetype.Etype {
+ return OSTRARRAYBYTE
+ }
+ if dst.Type.Etype == runetype.Etype {
+ return OSTRARRAYRUNE
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (Isptr[src.Etype] != 0 || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
+ return OCONVNOP
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.Etype == TUNSAFEPTR && (Isptr[dst.Etype] != 0 || dst.Etype == TUINTPTR) {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+// Convert node n for assignment to type t.
+func assignconv(n *Node, t *Type, context string) *Node {
+ var op int
+ var r *Node
+ var old *Node
+ var why string
+
+ if n == nil || n.Type == nil || n.Type.Broke != 0 {
+ return n
+ }
+
+ if t.Etype == TBLANK && n.Type.Etype == TNIL {
+ Yyerror("use of untyped nil")
+ }
+
+ old = n
+ old.Diag++ // silence errors about n; we'll issue one below
+ defaultlit(&n, t)
+ old.Diag--
+ if t.Etype == TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type == idealbool && t.Etype != TBOOL {
+ if n.Op == ONAME || n.Op == OLITERAL {
+ r = Nod(OCONVNOP, n, nil)
+ r.Type = Types[TBOOL]
+ r.Typecheck = 1
+ r.Implicit = 1
+ n = r
+ }
+ }
+
+ if Eqtype(n.Type, t) {
+ return n
+ }
+
+ op = assignop(n.Type, t, &why)
+ if op == 0 {
+ Yyerror("cannot use %v as type %v in %s%s", Nconv(n, obj.FmtLong), Tconv(t, 0), context, why)
+ op = OCONV
+ }
+
+ r = Nod(op, n, nil)
+ r.Type = t
+ r.Typecheck = 1
+ r.Implicit = 1
+ r.Orig = n.Orig
+ return r
+}
+
+func subtype(stp **Type, t *Type, d int) int {
+ var st *Type
+
+loop:
+ st = *stp
+ if st == nil {
+ return 0
+ }
+
+ d++
+ if d >= 10 {
+ return 0
+ }
+
+ switch st.Etype {
+ default:
+ return 0
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TARRAY:
+ stp = &st.Type
+ goto loop
+
+ case TANY:
+ if !(st.Copyany != 0) {
+ return 0
+ }
+ *stp = t
+
+ case TMAP:
+ if subtype(&st.Down, t, d) != 0 {
+ break
+ }
+ stp = &st.Type
+ goto loop
+
+ case TFUNC:
+ for {
+ if subtype(&st.Type, t, d) != 0 {
+ break
+ }
+ if subtype(&st.Type.Down.Down, t, d) != 0 {
+ break
+ }
+ if subtype(&st.Type.Down, t, d) != 0 {
+ break
+ }
+ return 0
+ }
+
+ case TSTRUCT:
+ for st = st.Type; st != nil; st = st.Down {
+ if subtype(&st.Type, t, d) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ return 1
+}
+
+/*
+ * Is this a 64-bit type?
+ */
+func Is64(t *Type) int {
+ if t == nil {
+ return 0
+ }
+ switch Simtype[t.Etype] {
+ case TINT64,
+ TUINT64,
+ TPTR64:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * Is a conversion between t1 and t2 a no-op?
+ */
+func Noconv(t1 *Type, t2 *Type) int {
+ var e1 int
+ var e2 int
+
+ e1 = int(Simtype[t1.Etype])
+ e2 = int(Simtype[t2.Etype])
+
+ switch e1 {
+ case TINT8,
+ TUINT8:
+ return bool2int(e2 == TINT8 || e2 == TUINT8)
+
+ case TINT16,
+ TUINT16:
+ return bool2int(e2 == TINT16 || e2 == TUINT16)
+
+ case TINT32,
+ TUINT32,
+ TPTR32:
+ return bool2int(e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32)
+
+ case TINT64,
+ TUINT64,
+ TPTR64:
+ return bool2int(e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64)
+
+ case TFLOAT32:
+ return bool2int(e2 == TFLOAT32)
+
+ case TFLOAT64:
+ return bool2int(e2 == TFLOAT64)
+ }
+
+ return 0
+}
+
+func argtype(on *Node, t *Type) {
+ dowidth(t)
+ if !(subtype(&on.Type, t, 0) != 0) {
+ Fatal("argtype: failed %v %v\n", Nconv(on, 0), Tconv(t, 0))
+ }
+}
+
+func shallow(t *Type) *Type {
+ var nt *Type
+
+ if t == nil {
+ return nil
+ }
+ nt = typ(0)
+ *nt = *t
+ if t.Orig == t {
+ nt.Orig = nt
+ }
+ return nt
+}
+
+func deep(t *Type) *Type {
+ var nt *Type
+ var xt *Type
+
+ if t == nil {
+ return nil
+ }
+
+ switch t.Etype {
+ default:
+ nt = t // share from here down
+
+ case TANY:
+ nt = shallow(t)
+ nt.Copyany = 1
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TARRAY:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+
+ case TMAP:
+ nt = shallow(t)
+ nt.Down = deep(t.Down)
+ nt.Type = deep(t.Type)
+
+ case TFUNC:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+ nt.Type.Down = deep(t.Type.Down)
+ nt.Type.Down.Down = deep(t.Type.Down.Down)
+
+ case TSTRUCT:
+ nt = shallow(t)
+ nt.Type = shallow(t.Type)
+ xt = nt.Type
+
+ for t = t.Type; t != nil; t = t.Down {
+ xt.Type = deep(t.Type)
+ xt.Down = shallow(t.Down)
+ xt = xt.Down
+ }
+ }
+
+ return nt
+}
+
+func syslook(name string, copy int) *Node {
+ var s *Sym
+ var n *Node
+
+ s = Pkglookup(name, Runtimepkg)
+ if s == nil || s.Def == nil {
+ Fatal("syslook: can't find runtime.%s", name)
+ }
+
+ if !(copy != 0) {
+ return s.Def
+ }
+
+ n = Nod(0, nil, nil)
+ *n = *s.Def
+ n.Type = deep(s.Def.Type)
+
+ return n
+}
+
+/*
+ * compute a hash value for type t.
+ * if t is a method type, ignore the receiver
+ * so that the hash can be used in interface checks.
+ * %T already contains
+ * all the necessary logic to generate a representation
+ * of the type that completely describes it.
+ * using smprint here avoids duplicating that code.
+ * using md5 here is overkill, but i got tired of
+ * accidental collisions making the runtime think
+ * two types are equal when they really aren't.
+ */
+func typehash(t *Type) uint32 {
+ var p string
+ var d MD5
+
+ if t.Thistuple != 0 {
+ // hide method receiver from Tpretty
+ t.Thistuple = 0
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+ t.Thistuple = 1
+ } else {
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+ }
+
+ //print("typehash: %s\n", p);
+ md5reset(&d)
+
+ md5write(&d, []byte(p), len(p))
+
+ return uint32(md5sum(&d, nil))
+}
+
+func Ptrto(t *Type) *Type {
+ var t1 *Type
+
+ if Tptr == 0 {
+ Fatal("ptrto: no tptr")
+ }
+ t1 = typ(Tptr)
+ t1.Type = t
+ t1.Width = int64(Widthptr)
+ t1.Align = uint8(Widthptr)
+ return t1
+}
+
+func frame(context int) {
+ var l *NodeList
+ var n *Node
+ var w int64
+
+ if context != 0 {
+ fmt.Printf("--- external frame ---\n")
+ l = externdcl
+ } else if Curfn != nil {
+ fmt.Printf("--- %v frame ---\n", Sconv(Curfn.Nname.Sym, 0))
+ l = Curfn.Dcl
+ } else {
+ return
+ }
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ w = -1
+ if n.Type != nil {
+ w = n.Type.Width
+ }
+ switch n.Op {
+ case ONAME:
+ fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), n.Vargen, Tconv(n.Type, 0), w)
+
+ case OTYPE:
+ fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), Tconv(n.Type, 0), w)
+ }
+ }
+}
+
+/*
+ * calculate sethi/ullman number
+ * roughly how many registers needed to
+ * compile a node. used to compile the
+ * hardest side first to minimize registers.
+ */
+func ullmancalc(n *Node) {
+ var ul int
+ var ur int
+
+ if n == nil {
+ return
+ }
+
+ if n.Ninit != nil {
+ ul = UINF
+ goto out
+ }
+
+ switch n.Op {
+ case OREGISTER,
+ OLITERAL,
+ ONAME:
+ ul = 1
+ if n.Class == PPARAMREF || (n.Class&PHEAP != 0) {
+ ul++
+ }
+ goto out
+
+ case OCALL,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ ul = UINF
+ goto out
+
+ // hard with race detector
+ case OANDAND,
+ OOROR:
+ if flag_race != 0 {
+ ul = UINF
+ goto out
+ }
+ }
+
+ ul = 1
+ if n.Left != nil {
+ ul = int(n.Left.Ullman)
+ }
+ ur = 1
+ if n.Right != nil {
+ ur = int(n.Right.Ullman)
+ }
+ if ul == ur {
+ ul += 1
+ }
+ if ur > ul {
+ ul = ur
+ }
+
+out:
+ if ul > 200 {
+ ul = 200 // clamp to uchar with room to grow
+ }
+ n.Ullman = uint8(ul)
+}
+
+func badtype(o int, tl *Type, tr *Type) {
+ var fmt_ string
+ var s string
+
+ fmt_ = ""
+ if tl != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", Tconv(tl, 0))
+ }
+ if tr != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", Tconv(tr, 0))
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && Isptr[tl.Etype] != 0 && Isptr[tr.Etype] != 0 {
+ if tl.Type.Etype == TSTRUCT && tr.Type.Etype == TINTER {
+ fmt_ += fmt.Sprintf("\n\t(*struct vs *interface)")
+ } else if tl.Type.Etype == TINTER && tr.Type.Etype == TSTRUCT {
+ fmt_ += fmt.Sprintf("\n\t(*interface vs *struct)")
+ }
+ }
+
+ s = fmt_
+ Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
+}
+
+/*
+ * iterator to walk a structure declaration
+ */
+func Structfirst(s *Iter, nn **Type) *Type {
+ var n *Type
+ var t *Type
+
+ n = *nn
+ if n == nil {
+ goto bad
+ }
+
+ switch n.Etype {
+ default:
+ goto bad
+
+ case TSTRUCT,
+ TINTER,
+ TFUNC:
+ break
+ }
+
+ t = n.Type
+ if t == nil {
+ goto rnil
+ }
+
+ if t.Etype != TFIELD {
+ Fatal("structfirst: not field %v", Tconv(t, 0))
+ }
+
+ s.T = t
+ return t
+
+bad:
+ Fatal("structfirst: not struct %v", Tconv(n, 0))
+
+rnil:
+ return nil
+}
+
+func structnext(s *Iter) *Type {
+ var n *Type
+ var t *Type
+
+ n = s.T
+ t = n.Down
+ if t == nil {
+ goto rnil
+ }
+
+ if t.Etype != TFIELD {
+ goto bad
+ }
+
+ s.T = t
+ return t
+
+bad:
+ Fatal("structnext: not struct %v", Tconv(n, 0))
+
+rnil:
+ return nil
+}
+
+/*
+ * iterator to this and inargs in a function
+ */
+func funcfirst(s *Iter, t *Type) *Type {
+ var fp *Type
+
+ if t == nil {
+ goto bad
+ }
+
+ if t.Etype != TFUNC {
+ goto bad
+ }
+
+ s.Tfunc = t
+ s.Done = 0
+ fp = Structfirst(s, getthis(t))
+ if fp == nil {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(t))
+ }
+
+ return fp
+
+bad:
+ Fatal("funcfirst: not func %v", Tconv(t, 0))
+ return nil
+}
+
+func funcnext(s *Iter) *Type {
+ var fp *Type
+
+ fp = structnext(s)
+ if fp == nil && !(s.Done != 0) {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(s.Tfunc))
+ }
+
+ return fp
+}
+
+func getthis(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getthis: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type
+}
+
+func Getoutarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getoutarg: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type.Down
+}
+
+func getinarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getinarg: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type.Down.Down
+}
+
+func getthisx(t *Type) *Type {
+ return *getthis(t)
+}
+
+func getoutargx(t *Type) *Type {
+ return *Getoutarg(t)
+}
+
+func getinargx(t *Type) *Type {
+ return *getinarg(t)
+}
+
+/*
+ * return !(op)
+ * eg == <=> !=
+ */
+func Brcom(a int) int {
+ switch a {
+ case OEQ:
+ return ONE
+ case ONE:
+ return OEQ
+ case OLT:
+ return OGE
+ case OGT:
+ return OLE
+ case OLE:
+ return OGT
+ case OGE:
+ return OLT
+ }
+
+ Fatal("brcom: no com for %v\n", Oconv(int(a), 0))
+ return a
+}
+
+/*
+ * return reverse(op)
+ * eg a op b <=> b r(op) a
+ */
+func Brrev(a int) int {
+ switch a {
+ case OEQ:
+ return OEQ
+ case ONE:
+ return ONE
+ case OLT:
+ return OGT
+ case OGT:
+ return OLT
+ case OLE:
+ return OGE
+ case OGE:
+ return OLE
+ }
+
+ Fatal("brcom: no rev for %v\n", Oconv(int(a), 0))
+ return a
+}
+
+/*
+ * return side effect-free n, appending side effects to init.
+ * result is assignable if n is.
+ */
+func safeexpr(n *Node, init **NodeList) *Node {
+ var l *Node
+ var r *Node
+ var a *Node
+
+ if n == nil {
+ return nil
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+
+ case ODOT:
+ l = safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ r = Nod(OXXX, nil, nil)
+ *r = *n
+ r.Left = l
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ return r
+
+ case ODOTPTR,
+ OIND:
+ l = safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ walkexpr(&a, init)
+ return a
+
+ case OINDEX,
+ OINDEXMAP:
+ l = safeexpr(n.Left, init)
+ r = safeexpr(n.Right, init)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ a.Right = r
+ walkexpr(&a, init)
+ return a
+ }
+
+ // make a copy; must not be used as an lvalue
+ if islvalue(n) != 0 {
+ Fatal("missing lvalue case in safeexpr: %v", Nconv(n, 0))
+ }
+ return cheapexpr(n, init)
+}
+
+func copyexpr(n *Node, t *Type, init **NodeList) *Node {
+ var a *Node
+ var l *Node
+
+ l = temp(t)
+ a = Nod(OAS, l, n)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ return l
+}
+
+/*
+ * return side-effect free and cheap n, appending side effects to init.
+ * result may not be assignable.
+ */
+func cheapexpr(n *Node, init **NodeList) *Node {
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+ }
+
+ return copyexpr(n, n.Type, init)
+}
+
+/*
+ * return n in a local variable of type t if it is not already.
+ * the value is guaranteed not to change except by direct
+ * assignment to it.
+ */
+func localexpr(n *Node, t *Type, init **NodeList) *Node {
+ if n.Op == ONAME && (!(n.Addrtaken != 0) || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
+ return n
+ }
+
+ return copyexpr(n, t, init)
+}
+
+func Setmaxarg(t *Type, extra int32) {
+ var w int64
+
+ dowidth(t)
+ w = t.Argwid
+ if w >= Thearch.MAXWIDTH {
+ Fatal("bad argwid %v", Tconv(t, 0))
+ }
+ w += int64(extra)
+ if w >= Thearch.MAXWIDTH {
+ Fatal("bad argwid %d + %v", extra, Tconv(t, 0))
+ }
+ if w > Maxarg {
+ Maxarg = w
+ }
+}
+
+/*
+ * unicode-aware case-insensitive strcmp
+ */
+
+/*
+ * code to resolve elided DOTs
+ * in embedded types
+ */
+
+// search depth 0 --
+// return count of fields+methods
+// found with a given name
+func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
+ var f *Type
+ var u *Type
+ var c int
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ u = u.Type
+ }
+
+ c = 0
+ if u.Etype == TSTRUCT || u.Etype == TINTER {
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Sym == s || (ignorecase != 0 && f.Type.Etype == TFUNC && f.Type.Thistuple > 0 && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ for f = u.Method; f != nil; f = f.Down {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase != 0 && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+// search depth d for field/method s --
+// return count of fields+methods
+// found at search depth.
+// answer is in dotlist array and
+// count of number of ways is returned.
+func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
+ var f *Type
+ var u *Type
+ var c int
+ var a int
+
+ if t.Trecur != 0 {
+ return 0
+ }
+ t.Trecur = 1
+
+ if d == 0 {
+ c = lookdot0(s, t, save, ignorecase)
+ goto out
+ }
+
+ c = 0
+ u = t
+ if Isptr[u.Etype] != 0 {
+ u = u.Type
+ }
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ d--
+ for f = u.Type; f != nil; f = f.Down {
+ if !(f.Embedded != 0) {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ a = adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ }
+
+out:
+ t.Trecur = 0
+ return c
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func adddot(n *Node) *Node {
+ var t *Type
+ var s *Sym
+ var c int
+ var d int
+
+ typecheck(&n.Left, Etype|Erv)
+ n.Diag |= n.Left.Diag
+ t = n.Left.Type
+ if t == nil {
+ goto ret
+ }
+
+ if n.Left.Op == OTYPE {
+ goto ret
+ }
+
+ if n.Right.Op != ONAME {
+ goto ret
+ }
+ s = n.Right.Sym
+ if s == nil {
+ goto ret
+ }
+
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, nil, 0)
+ if c > 0 {
+ goto out
+ }
+ }
+
+ goto ret
+
+out:
+ if c > 1 {
+ Yyerror("ambiguous selector %v", Nconv(n, 0))
+ n.Left = nil
+ return n
+ }
+
+ // rebuild elided dots
+ for c = d - 1; c >= 0; c-- {
+ n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ }
+
+ret:
+ return n
+}
+
+/*
+ * code to help generate trampoline
+ * functions for methods on embedded
+ * subtypes.
+ * these are approx the same as
+ * the corresponding adddot routines
+ * except that they expect to be called
+ * with unique tasks and they return
+ * the actual methods.
+ */
+type Symlink struct {
+ field *Type
+ good uint8
+ followptr uint8
+ link *Symlink
+}
+
+var slist *Symlink
+
+func expand0(t *Type, followptr int) {
+ var f *Type
+ var u *Type
+ var sl *Symlink
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ followptr = 1
+ u = u.Type
+ }
+
+ if u.Etype == TINTER {
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = uint8(followptr)
+ slist = sl
+ }
+
+ return
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ for f = u.Method; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = uint8(followptr)
+ slist = sl
+ }
+ }
+}
+
+func expand1(t *Type, d int, followptr int) {
+ var f *Type
+ var u *Type
+
+ if t.Trecur != 0 {
+ return
+ }
+ if d == 0 {
+ return
+ }
+ t.Trecur = 1
+
+ if d != len(dotlist)-1 {
+ expand0(t, followptr)
+ }
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ followptr = 1
+ u = u.Type
+ }
+
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ for f = u.Type; f != nil; f = f.Down {
+ if !(f.Embedded != 0) {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, d-1, followptr)
+ }
+
+out:
+ t.Trecur = 0
+}
+
+func expandmeth(t *Type) {
+ var sl *Symlink
+ var f *Type
+ var c int
+ var d int
+
+ if t == nil || t.Xmethod != nil {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags |= SymUniq
+ }
+
+ // generate all reachable methods
+ slist = nil
+
+ expand1(t, len(dotlist)-1, 0)
+
+ // check each method to be uniquely reachable
+ for sl = slist; sl != nil; sl = sl.link {
+ sl.field.Sym.Flags &^= SymUniq
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(sl.field.Sym, t, d, &f, 0)
+ if c == 0 {
+ continue
+ }
+ if c == 1 {
+ // addot1 may have dug out arbitrary fields, we only want methods.
+ if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
+ sl.good = 1
+ sl.field = f
+ }
+ }
+
+ break
+ }
+ }
+
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags &^= SymUniq
+ }
+
+ t.Xmethod = t.Method
+ for sl = slist; sl != nil; sl = sl.link {
+ if sl.good != 0 {
+ // add it to the base type method list
+ f = typ(TFIELD)
+
+ *f = *sl.field
+ f.Embedded = 1 // needs a trampoline
+ if sl.followptr != 0 {
+ f.Embedded = 2
+ }
+ f.Down = t.Xmethod
+ t.Xmethod = f
+ }
+ }
+}
+
+/*
+ * Given funarg struct list, return list of ODCLFIELD Node fn args.
+ */
+func structargs(tl **Type, mustname int) *NodeList {
+ var savet Iter
+ var a *Node
+ var n *Node
+ var args *NodeList
+ var t *Type
+ var buf string
+ var gen int
+
+ args = nil
+ gen = 0
+ for t = Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
+ n = nil
+ if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ buf = fmt.Sprintf(".anon%d", gen)
+ gen++
+
+ n = newname(Lookup(buf))
+ } else if t.Sym != nil {
+ n = newname(t.Sym)
+ }
+ a = Nod(ODCLFIELD, n, typenod(t.Type))
+ a.Isddd = t.Isddd
+ if n != nil {
+ n.Isddd = t.Isddd
+ }
+ args = list(args, a)
+ }
+
+ return args
+}
+
+/*
+ * Generate a wrapper function to convert from
+ * a receiver of type T to a receiver of type U.
+ * That is,
+ *
+ * func (t T) M() {
+ * ...
+ * }
+ *
+ * already exists; this function generates
+ *
+ * func (u U) M() {
+ * u.M()
+ * }
+ *
+ * where the types T and U are such that u.M() is valid
+ * and calls the T.M method.
+ * The resulting function is for use in method tables.
+ *
+ * rcvr - U
+ * method - M func (t T)(), a TFIELD type struct
+ * newnam - the eventual mangled name of this function
+ */
+
+var genwrapper_linehistdone int = 0
+
+func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
+ var this *Node
+ var fn *Node
+ var call *Node
+ var n *Node
+ var t *Node
+ var pad *Node
+ var dot *Node
+ var as *Node
+ var l *NodeList
+ var args *NodeList
+ var in *NodeList
+ var out *NodeList
+ var tpad *Type
+ var methodrcvr *Type
+ var isddd int
+ var v Val
+
+ if false && Debug['r'] != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", Tconv(rcvr, 0), Tconv(method, 0), Sconv(newnam, 0))
+ }
+
+ lexlineno++
+ lineno = lexlineno
+ if genwrapper_linehistdone == 0 {
+ // All the wrappers can share the same linehist entry.
+ linehist("<autogenerated>", 0, 0)
+
+ genwrapper_linehistdone = 1
+ }
+
+ dclcontext = PEXTERN
+ markdcl()
+
+ this = Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
+ this.Left.Ntype = this.Right
+ in = structargs(getinarg(method.Type), 1)
+ out = structargs(Getoutarg(method.Type), 0)
+
+ t = Nod(OTFUNC, nil, nil)
+ l = list1(this)
+ if iface != 0 && rcvr.Width < Types[Tptr].Width {
+ // Building method for interface table and receiver
+ // is smaller than the single pointer-sized word
+ // that the interface call will pass in.
+ // Add a dummy padding argument after the
+ // receiver to make up the difference.
+ tpad = typ(TARRAY)
+
+ tpad.Type = Types[TUINT8]
+ tpad.Bound = Types[Tptr].Width - rcvr.Width
+ pad = Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
+ l = list(l, pad)
+ }
+
+ t.List = concat(l, in)
+ t.Rlist = out
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ fn.Nname = newname(newnam)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = t
+ declare(fn.Nname, PFUNC)
+ funchdr(fn)
+
+ // arg list
+ args = nil
+
+ isddd = 0
+ for l = in; l != nil; l = l.Next {
+ args = list(args, l.N.Left)
+ isddd = int(l.N.Left.Isddd)
+ }
+
+ methodrcvr = getthisx(method.Type).Type.Type
+
+ // generate nil pointer check for better error
+ if Isptr[rcvr.Etype] != 0 && rcvr.Type == methodrcvr {
+ // generating wrapper from *T to T.
+ n = Nod(OIF, nil, nil)
+
+ n.Ntest = Nod(OEQ, this.Left, nodnil())
+
+ // these strings are already in the reflect tables,
+ // so no space cost to use them here.
+ l = nil
+
+ v.Ctype = CTSTR
+ v.U.Sval = newstrlit(rcvr.Type.Sym.Pkg.Name) // package name
+ l = list(l, nodlit(v))
+ v.U.Sval = newstrlit(rcvr.Type.Sym.Name) // type name
+ l = list(l, nodlit(v))
+ v.U.Sval = newstrlit(method.Sym.Name)
+ l = list(l, nodlit(v)) // method name
+ call = Nod(OCALL, syslook("panicwrap", 0), nil)
+ call.List = l
+ n.Nbody = list1(call)
+ fn.Nbody = list(fn.Nbody, n)
+ }
+
+ dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+
+ // generate call
+ if !(flag_race != 0) && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !(isifacemethod(method.Type) != 0) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ dot = dot.Left // skip final .M
+ if !(Isptr[dotlist[0].field.Type.Etype] != 0) {
+ dot = Nod(OADDR, dot, nil)
+ }
+ as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
+ as.Right.Type = rcvr
+ fn.Nbody = list(fn.Nbody, as)
+ n = Nod(ORETJMP, nil, nil)
+ n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
+ fn.Nbody = list(fn.Nbody, n)
+ } else {
+ fn.Wrapper = 1 // ignore frame for panic+recover matching
+ call = Nod(OCALL, dot, nil)
+ call.List = args
+ call.Isddd = uint8(isddd)
+ if method.Type.Outtuple > 0 {
+ n = Nod(ORETURN, nil, nil)
+ n.List = list1(call)
+ call = n
+ }
+
+ fn.Nbody = list(fn.Nbody, call)
+ }
+
+ if false && Debug['r'] != 0 {
+ dumplist("genwrapper body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+
+ // wrappers where T is anonymous (struct or interface) can be duplicated.
+ if rcvr.Etype == TSTRUCT || rcvr.Etype == TINTER || Isptr[rcvr.Etype] != 0 && rcvr.Type.Etype == TSTRUCT {
+ fn.Dupok = 1
+ }
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+
+ // Set inl_nonlocal to whether we are calling a method on a
+ // type defined in a different package. Checked in inlvar.
+ if !(methodrcvr.Local != 0) {
+ inl_nonlocal = 1
+ }
+
+ inlcalls(fn)
+
+ inl_nonlocal = 0
+
+ Curfn = nil
+ funccompile(fn)
+}
+
+func hashmem(t *Type) *Node {
+ var tfn *Node
+ var n *Node
+ var sym *Sym
+
+ sym = Pkglookup("memhash", Runtimepkg)
+
+ n = newname(sym)
+ n.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+func hashfor(t *Type) *Node {
+ var a int
+ var sym *Sym
+ var tfn *Node
+ var n *Node
+
+ a = algtype1(t, nil)
+ switch a {
+ case AMEM:
+ Fatal("hashfor with AMEM type")
+ fallthrough
+
+ case AINTER:
+ sym = Pkglookup("interhash", Runtimepkg)
+
+ case ANILINTER:
+ sym = Pkglookup("nilinterhash", Runtimepkg)
+
+ case ASTRING:
+ sym = Pkglookup("strhash", Runtimepkg)
+
+ case AFLOAT32:
+ sym = Pkglookup("f32hash", Runtimepkg)
+
+ case AFLOAT64:
+ sym = Pkglookup("f64hash", Runtimepkg)
+
+ case ACPLX64:
+ sym = Pkglookup("c64hash", Runtimepkg)
+
+ case ACPLX128:
+ sym = Pkglookup("c128hash", Runtimepkg)
+
+ default:
+ sym = typesymprefix(".hash", t)
+ }
+
+ n = newname(sym)
+ n.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+/*
+ * Generate a helper function to compute the hash of a value of type t.
+ */
+func genhash(sym *Sym, t *Type) {
+ var n *Node
+ var fn *Node
+ var np *Node
+ var nh *Node
+ var ni *Node
+ var call *Node
+ var nx *Node
+ var na *Node
+ var tfn *Node
+ var r *Node
+ var hashel *Node
+ var first *Type
+ var t1 *Type
+ var old_safemode int
+ var size int64
+ var mul int64
+ var offend int64
+
+ if Debug['r'] != 0 {
+ fmt.Printf("genhash %v %v\n", Sconv(sym, 0), Tconv(t, 0))
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p *T, h uintptr) uintptr
+ fn = Nod(ODCLFUNC, nil, nil)
+
+ fn.Nname = newname(sym)
+ fn.Nname.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ fn.Nname.Ntype = tfn
+
+ n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np = n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
+ tfn.List = list(tfn.List, n)
+ nh = n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+ typecheck(&fn.Nname.Ntype, Etype)
+
+ // genhash is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatal("genhash %v", Tconv(t, 0))
+ fallthrough
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ Fatal("genhash %v", Tconv(t, 0))
+ }
+
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel = hashfor(t.Type)
+
+ n = Nod(ORANGE, nil, Nod(OIND, np, nil))
+ ni = newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ n.List = list1(ni)
+ n.Colas = 1
+ colasdefn(n.List, n)
+ ni = n.List.N
+
+ // TODO: with aeshash we don't need these shift/mul parts
+
+ // h = h<<3 | h>>61
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OOR, Nod(OLSH, nh, Nodintconst(3)), Nod(ORSH, nh, Nodintconst(int64(Widthptr)*8-3)))))
+
+ // h *= mul
+ // Same multipliers as in runtime.memhash.
+ if Widthptr == 4 {
+ mul = 3267000013
+ } else {
+ mul = 23344194077549503
+ }
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
+
+ // h = hashel(&p[i], h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OINDEX, np, ni)
+ nx.Bounded = 1
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, call))
+
+ fn.Nbody = list(fn.Nbody, n)
+
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ case TSTRUCT:
+ first = nil
+
+ offend = 0
+ for t1 = t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) != 0 {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memhash for fields up to this one.
+ if first != nil {
+ size = offend - first.Width // first->width is offset
+ hashel = hashmem(first.Type)
+
+ // h = hashel(&p.first, size, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(first.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(size))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Run hash for this field.
+ if algtype1(t1.Type, nil) == AMEM {
+ hashel = hashmem(t1.Type)
+
+ // h = memhash(&p.t1, h, size)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(t1.Type.Width))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ } else {
+ hashel = hashfor(t1.Type)
+
+ // h = hashel(&p.t1, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ }
+ }
+ }
+
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, nh)
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("genhash body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Dupok = 1
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+// Return node for
+// if p.field != q.field { return false }
+func eqfield(p *Node, q *Node, field *Node) *Node {
+ var nif *Node
+ var nx *Node
+ var ny *Node
+ var r *Node
+
+ nx = Nod(OXDOT, p, field)
+ ny = Nod(OXDOT, q, field)
+ nif = Nod(OIF, nil, nil)
+ nif.Ntest = Nod(ONE, nx, ny)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(0))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
+ var buf string
+ var fn *Node
+
+ switch size {
+ default:
+ fn = syslook("memequal", 1)
+ *needsize = 1
+
+ case 1,
+ 2,
+ 4,
+ 8,
+ 16:
+ buf = fmt.Sprintf("memequal%d", int(size)*8)
+ fn = syslook(buf, 1)
+ *needsize = 0
+ }
+
+ argtype(fn, type_)
+ argtype(fn, type_)
+ return fn
+}
+
+// Return node for
+// if !memequal(&p.field, &q.field [, size]) { return false }
+func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
+ var nif *Node
+ var nx *Node
+ var ny *Node
+ var call *Node
+ var r *Node
+ var needsize int
+
+ nx = Nod(OADDR, Nod(OXDOT, p, field), nil)
+ nx.Etype = 1 // does not escape
+ ny = Nod(OADDR, Nod(OXDOT, q, field), nil)
+ ny.Etype = 1 // does not escape
+ typecheck(&nx, Erv)
+ typecheck(&ny, Erv)
+
+ call = Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
+ call.List = list(call.List, nx)
+ call.List = list(call.List, ny)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(size))
+ }
+
+ nif = Nod(OIF, nil, nil)
+ nif.Ninit = list(nif.Ninit, call)
+ nif.Ntest = Nod(ONOT, call, nil)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(0))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+/*
+ * Generate a helper function to check equality of two values of type t.
+ */
+func geneq(sym *Sym, t *Type) {
+ var n *Node
+ var fn *Node
+ var np *Node
+ var nq *Node
+ var tfn *Node
+ var nif *Node
+ var ni *Node
+ var nx *Node
+ var ny *Node
+ var nrange *Node
+ var r *Node
+ var t1 *Type
+ var first *Type
+ var old_safemode int
+ var size int64
+ var offend int64
+
+ if Debug['r'] != 0 {
+ fmt.Printf("geneq %v %v\n", Sconv(sym, 0), Tconv(t, 0))
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p, q *T) bool
+ fn = Nod(ODCLFUNC, nil, nil)
+
+ fn.Nname = newname(sym)
+ fn.Nname.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ fn.Nname.Ntype = tfn
+
+ n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np = n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ nq = n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+
+ // geneq is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatal("geneq %v", Tconv(t, 0))
+ fallthrough
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ Fatal("geneq %v", Tconv(t, 0))
+ }
+
+ // An array of pure memory would be handled by the
+ // standard memequal, so the element type must not be
+ // pure memory. Even if we unrolled the range loop,
+ // each iteration would be a function call, so don't bother
+ // unrolling.
+ nrange = Nod(ORANGE, nil, Nod(OIND, np, nil))
+
+ ni = newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ nrange.List = list1(ni)
+ nrange.Colas = 1
+ colasdefn(nrange.List, nrange)
+ ni = nrange.List.N
+
+ // if p[i] != q[i] { return false }
+ nx = Nod(OINDEX, np, ni)
+
+ nx.Bounded = 1
+ ny = Nod(OINDEX, nq, ni)
+ ny.Bounded = 1
+
+ nif = Nod(OIF, nil, nil)
+ nif.Ntest = Nod(ONE, nx, ny)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(0))
+ nif.Nbody = list(nif.Nbody, r)
+ nrange.Nbody = list(nrange.Nbody, nif)
+ fn.Nbody = list(fn.Nbody, nrange)
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ // Skip blank-named fields.
+ case TSTRUCT:
+ first = nil
+
+ offend = 0
+ for t1 = t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) != 0 {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memequal for fields up to this one.
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if first != nil {
+ if first.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ } else if first.Down.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ first = first.Down
+ if !isblanksym(first.Sym) {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ }
+ } else {
+ // More than two fields: use memequal.
+ size = offend - first.Width // first->width is offset
+ fn.Nbody = list(fn.Nbody, eqmem(np, nq, newname(first.Sym), size))
+ }
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Check this field, which is not just memory.
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(t1.Sym)))
+ }
+ }
+
+ // return true
+ r = Nod(ORETURN, nil, nil)
+
+ r.List = list(r.List, Nodbool(1))
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("geneq body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Dupok = 1
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
+ var i int
+ var c int
+ var d int
+ var m *Type
+
+ *followptr = 0
+
+ if t == nil {
+ return nil
+ }
+
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, &m, ignorecase)
+ if c > 1 {
+ Yyerror("%v.%v is ambiguous", Tconv(t, 0), Sconv(s, 0))
+ return nil
+ }
+
+ if c == 1 {
+ for i = 0; i < d; i++ {
+ if Isptr[dotlist[i].field.Type.Etype] != 0 {
+ *followptr = 1
+ break
+ }
+ }
+
+ if m.Type.Etype != TFUNC || m.Type.Thistuple == 0 {
+ Yyerror("%v.%v is a field, not a method", Tconv(t, 0), Sconv(s, 0))
+ return nil
+ }
+
+ return m
+ }
+ }
+
+ return nil
+}
+
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
+ var t0 *Type
+ var im *Type
+ var tm *Type
+ var rcvr *Type
+ var imtype *Type
+ var followptr int
+
+ t0 = t
+ if t == nil {
+ return 0
+ }
+
+ // if this is too slow,
+ // could sort these first
+ // and then do one loop.
+
+ if t.Etype == TINTER {
+ for im = iface.Type; im != nil; im = im.Down {
+ for tm = t.Type; tm != nil; tm = tm.Down {
+ if tm.Sym == im.Sym {
+ if Eqtype(tm.Type, im.Type) {
+ goto found
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return 0
+ }
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return 0
+ found:
+ }
+
+ return 1
+ }
+
+ t = methtype(t, 0)
+ if t != nil {
+ expandmeth(t)
+ }
+ for im = iface.Type; im != nil; im = im.Down {
+ imtype = methodfunc(im.Type, nil)
+ tm = ifacelookdot(im.Sym, t, &followptr, 0)
+ if tm == nil || tm.Nointerface != 0 || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+ if tm == nil {
+ tm = ifacelookdot(im.Sym, t, &followptr, 1)
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return 0
+ }
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr = getthisx(tm.Type).Type.Type
+
+ if Isptr[rcvr.Etype] != 0 && !(Isptr[t0.Etype] != 0) && !(followptr != 0) && !(isifacemethod(tm.Type) != 0) {
+ if false && Debug['r'] != 0 {
+ Yyerror("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return 0
+ }
+ }
+
+ return 1
+}
+
+/*
+ * even simpler simtype; get rid of ptr, bool.
+ * assuming that the front end has rejected
+ * all the invalid conversions (like ptr -> bool)
+ */
+func Simsimtype(t *Type) int {
+ var et int
+
+ if t == nil {
+ return 0
+ }
+
+ et = int(Simtype[t.Etype])
+ switch et {
+ case TPTR32:
+ et = TUINT32
+
+ case TPTR64:
+ et = TUINT64
+
+ case TBOOL:
+ et = TUINT8
+ }
+
+ return et
+}
+
+func concat(a *NodeList, b *NodeList) *NodeList {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+
+ a.End.Next = b
+ a.End = b.End
+ b.End = nil
+ return a
+}
+
+func list1(n *Node) *NodeList {
+ var l *NodeList
+
+ if n == nil {
+ return nil
+ }
+ if n.Op == OBLOCK && n.Ninit == nil {
+ // Flatten list and steal storage.
+ // Poison pointer to catch errant uses.
+ l = n.List
+
+ n.List = nil
+ return l
+ }
+
+ l = new(NodeList)
+ l.N = n
+ l.End = l
+ return l
+}
+
+func list(l *NodeList, n *Node) *NodeList {
+ return concat(l, list1(n))
+}
+
+func listsort(l **NodeList, f func(*Node, *Node) int) {
+ var l1 *NodeList
+ var l2 *NodeList
+ var le *NodeList
+
+ if *l == nil || (*l).Next == nil {
+ return
+ }
+
+ l1 = *l
+ l2 = *l
+ for {
+ l2 = l2.Next
+ if l2 == nil {
+ break
+ }
+ l2 = l2.Next
+ if l2 == nil {
+ break
+ }
+ l1 = l1.Next
+ }
+
+ l2 = l1.Next
+ l1.Next = nil
+ l2.End = (*l).End
+ (*l).End = l1
+
+ l1 = *l
+ listsort(&l1, f)
+ listsort(&l2, f)
+
+ if f(l1.N, l2.N) < 0 {
+ *l = l1
+ } else {
+ *l = l2
+ l2 = l1
+ l1 = *l
+ }
+
+ // now l1 == *l; and l1 < l2
+
+ for (l1 != nil) && (l2 != nil) {
+ for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
+ l1 = l1.Next
+ }
+
+ // l1 is last one from l1 that is < l2
+ le = l1.Next // le is the rest of l1, first one that is >= l2
+ if le != nil {
+ le.End = (*l).End
+ }
+
+ (*l).End = l1 // cut *l at l1
+ *l = concat(*l, l2) // glue l2 to *l's tail
+
+ l1 = l2 // l1 is the first element of *l that is < the new l2
+ l2 = le // ... because l2 now is the old tail of l1
+ }
+
+ *l = concat(*l, l2) // any remainder
+}
+
+func listtreecopy(l *NodeList) *NodeList {
+ var out *NodeList
+
+ out = nil
+ for ; l != nil; l = l.Next {
+ out = list(out, treecopy(l.N))
+ }
+ return out
+}
+
+func liststmt(l *NodeList) *Node {
+ var n *Node
+
+ n = Nod(OBLOCK, nil, nil)
+ n.List = l
+ if l != nil {
+ n.Lineno = l.N.Lineno
+ }
+ return n
+}
+
+/*
+ * return nelem of list
+ */
+func count(l *NodeList) int {
+ var n int64
+
+ n = 0
+ for ; l != nil; l = l.Next {
+ n++
+ }
+ if int64(int(n)) != n { // Overflow.
+ Yyerror("too many elements in list")
+ }
+
+ return int(n)
+}
+
+/*
+ * return nelem of list
+ */
+func structcount(t *Type) int {
+ var v int
+ var s Iter
+
+ v = 0
+ for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
+ v++
+ }
+ return v
+}
+
+/*
+ * return power of 2 of the constant
+ * operand. -1 if it is not a power of 2.
+ * 1000+ if it is a -(power of 2)
+ */
+func powtwo(n *Node) int {
+ var v uint64
+ var b uint64
+ var i int
+
+ if n == nil || n.Op != OLITERAL || n.Type == nil {
+ goto no
+ }
+ if !(Isint[n.Type.Etype] != 0) {
+ goto no
+ }
+
+ v = uint64(Mpgetfix(n.Val.U.Xval))
+ b = 1
+ for i = 0; i < 64; i++ {
+ if b == v {
+ return i
+ }
+ b = b << 1
+ }
+
+ if !(Issigned[n.Type.Etype] != 0) {
+ goto no
+ }
+
+ v = -v
+ b = 1
+ for i = 0; i < 64; i++ {
+ if b == v {
+ return i + 1000
+ }
+ b = b << 1
+ }
+
+no:
+ return -1
+}
+
+/*
+ * return the unsigned type for
+ * a signed integer type.
+ * returns T if input is not a
+ * signed integer type.
+ */
+func tounsigned(t *Type) *Type {
+ // this is types[et+1], but not sure
+ // that this relation is immutable
+ switch t.Etype {
+ default:
+ fmt.Printf("tounsigned: unknown type %v\n", Tconv(t, 0))
+ t = nil
+
+ case TINT:
+ t = Types[TUINT]
+
+ case TINT8:
+ t = Types[TUINT8]
+
+ case TINT16:
+ t = Types[TUINT16]
+
+ case TINT32:
+ t = Types[TUINT32]
+
+ case TINT64:
+ t = Types[TUINT64]
+ }
+
+ return t
+}
+
+/*
+ * magic number for signed division
+ * see hacker's delight chapter 10
+ */
+func Smagic(m *Magic) {
+ var p int
+ var ad uint64
+ var anc uint64
+ var delta uint64
+ var q1 uint64
+ var r1 uint64
+ var q2 uint64
+ var r2 uint64
+ var t uint64
+ var mask uint64
+ var two31 uint64
+
+ m.Bad = 0
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 = mask ^ (mask >> 1)
+
+ p = m.W - 1
+ ad = uint64(m.Sd)
+ if m.Sd < 0 {
+ ad = -uint64(m.Sd)
+ }
+
+ // bad denominators
+ if ad == 0 || ad == 1 || ad == two31 {
+ m.Bad = 1
+ return
+ }
+
+ t = two31
+ ad &= mask
+
+ anc = t - 1 - t%ad
+ anc &= mask
+
+ q1 = two31 / anc
+ r1 = two31 - q1*anc
+ q1 &= mask
+ r1 &= mask
+
+ q2 = two31 / ad
+ r2 = two31 - q2*ad
+ q2 &= mask
+ r2 &= mask
+
+ for {
+ p++
+ q1 <<= 1
+ r1 <<= 1
+ q1 &= mask
+ r1 &= mask
+ if r1 >= anc {
+ q1++
+ r1 -= anc
+ q1 &= mask
+ r1 &= mask
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ q2 &= mask
+ r2 &= mask
+ if r2 >= ad {
+ q2++
+ r2 -= ad
+ q2 &= mask
+ r2 &= mask
+ }
+
+ delta = ad - r2
+ delta &= mask
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+
+ break
+ }
+
+ m.Sm = int64(q2 + 1)
+ if uint64(m.Sm)&two31 != 0 {
+ m.Sm |= ^int64(mask)
+ }
+ m.S = p - m.W
+}
+
+/*
+ * magic number for unsigned division
+ * see hacker's delight chapter 10
+ */
+func Umagic(m *Magic) {
+ var p int
+ var nc uint64
+ var delta uint64
+ var q1 uint64
+ var r1 uint64
+ var q2 uint64
+ var r2 uint64
+ var mask uint64
+ var two31 uint64
+
+ m.Bad = 0
+ m.Ua = 0
+
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 = mask ^ (mask >> 1)
+
+ m.Ud &= mask
+ if m.Ud == 0 || m.Ud == two31 {
+ m.Bad = 1
+ return
+ }
+
+ nc = mask - (-m.Ud&mask)%m.Ud
+ p = m.W - 1
+
+ q1 = two31 / nc
+ r1 = two31 - q1*nc
+ q1 &= mask
+ r1 &= mask
+
+ q2 = (two31 - 1) / m.Ud
+ r2 = (two31 - 1) - q2*m.Ud
+ q2 &= mask
+ r2 &= mask
+
+ for {
+ p++
+ if r1 >= nc-r1 {
+ q1 <<= 1
+ q1++
+ r1 <<= 1
+ r1 -= nc
+ } else {
+ q1 <<= 1
+ r1 <<= 1
+ }
+
+ q1 &= mask
+ r1 &= mask
+ if r2+1 >= m.Ud-r2 {
+ if q2 >= two31-1 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ q2++
+ r2 <<= 1
+ r2++
+ r2 -= m.Ud
+ } else {
+ if q2 >= two31 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ r2++
+ }
+
+ q2 &= mask
+ r2 &= mask
+
+ delta = m.Ud - 1 - r2
+ delta &= mask
+
+ if p < m.W+m.W {
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+ }
+
+ break
+ }
+
+ m.Um = q2 + 1
+ m.S = p - m.W
+}
+
+func ngotype(n *Node) *Sym {
+ if n.Type != nil {
+ return typenamesym(n.Type)
+ }
+ return nil
+}
+
+/*
+ * Convert raw string to the prefix that will be used in the symbol
+ * table. All control characters, space, '%' and '"', as well as
+ * non-7-bit clean bytes turn into %xx. The period needs escaping
+ * only in the last segment of the path, and it makes for happier
+ * users if we escape that as little as possible.
+ *
+ * If you edit this, edit ../ld/lib.c:/^pathtoprefix too.
+ * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+ */
+func pathtoprefix(s string) string {
+ slash := strings.LastIndex(s, "/")
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ goto escape
+ }
+ }
+ return s
+
+escape:
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
+}
+
+func mkpkg(path_ *Strlit) *Pkg {
+ var p *Pkg
+ var h int
+
+ h = int(stringhash(path_.S) & uint32(len(phash)-1))
+ for p = phash[h]; p != nil; p = p.Link {
+ if p.Path.S == path_.S {
+ return p
+ }
+ }
+
+ p = new(Pkg)
+ p.Path = path_
+ p.Prefix = pathtoprefix(path_.S)
+ p.Link = phash[h]
+ phash[h] = p
+ return p
+}
+
+func newstrlit(s string) *Strlit {
+ return &Strlit{
+ S: s,
+ }
+}
+
+func addinit(np **Node, init *NodeList) {
+ var n *Node
+
+ if init == nil {
+ return
+ }
+
+ n = *np
+ switch n.Op {
+ // There may be multiple refs to this node;
+ // introduce OCONVNOP to hold init list.
+ case ONAME,
+ OLITERAL:
+ n = Nod(OCONVNOP, n, nil)
+
+ n.Type = n.Left.Type
+ n.Typecheck = 1
+ *np = n
+ }
+
+ n.Ninit = concat(init, n.Ninit)
+ n.Ullman = UINF
+}
+
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func isbadimport(path_ *Strlit) bool {
+ var i int
+ var s string
+ var r uint
+
+ if len(path_.S) != len(path_.S) {
+ Yyerror("import path contains NUL")
+ return true
+ }
+
+ for i = 0; i < len(reservedimports); i++ {
+ if path_.S == reservedimports[i] {
+ Yyerror("import path \"%s\" is reserved and cannot be used", path_.S)
+ return true
+ }
+ }
+
+ _ = s
+ _ = r
+ for _, r := range path_.S {
+ if r == utf8.RuneError {
+ Yyerror("import path contains invalid UTF-8 sequence: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if r < 0x20 || r == 0x7f {
+ Yyerror("import path contains control character: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if r == '\\' {
+ Yyerror("import path contains backslash; use slash: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if unicode.IsSpace(rune(r)) {
+ Yyerror("import path contains space character: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+ Yyerror("import path contains invalid character '%c': \"%v\"", r, Zconv(path_, 0))
+ return true
+ }
+ }
+
+ return false
+}
+
+func checknil(x *Node, init **NodeList) {
+ var n *Node
+
+ if Isinter(x.Type) != 0 {
+ x = Nod(OITAB, x, nil)
+ typecheck(&x, Erv)
+ }
+
+ n = Nod(OCHECKNIL, x, nil)
+ n.Typecheck = 1
+ *init = list(*init, n)
+}
+
+/*
+ * Can this type be stored directly in an interface word?
+ * Yes, if the representation is a single pointer.
+ */
+func isdirectiface(t *Type) int {
+ switch t.Etype {
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return 1
+
+ // Array of 1 direct iface type can be direct.
+ case TARRAY:
+ return bool2int(t.Bound == 1 && isdirectiface(t.Type) != 0)
+
+ // Struct with 1 field of direct iface type can be direct.
+ case TSTRUCT:
+ return bool2int(t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type) != 0)
+ }
+
+ return 0
+}
diff --git a/src/cmd/internal/gc/swt.go b/src/cmd/internal/gc/swt.go
new file mode 100644
index 0000000000..cf1f7d43a6
--- /dev/null
+++ b/src/cmd/internal/gc/swt.go
@@ -0,0 +1,1028 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ Snorm = 0 + iota
+ Strue
+ Sfalse
+ Stype
+ Tdefault
+ Texprconst
+ Texprvar
+ Ttypenil
+ Ttypeconst
+ Ttypevar
+ Ncase = 4
+)
+
+type Case struct {
+ node *Node
+ hash uint32
+ type_ uint8
+ diag uint8
+ ordinal uint16
+ link *Case
+}
+
+var C *Case
+
+func dumpcase(c0 *Case) {
+ var c *Case
+
+ for c = c0; c != nil; c = c.link {
+ switch c.type_ {
+ case Tdefault:
+ fmt.Printf("case-default\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Texprconst:
+ fmt.Printf("case-exprconst\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Texprvar:
+ fmt.Printf("case-exprvar\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+
+ case Ttypenil:
+ fmt.Printf("case-typenil\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Ttypeconst:
+ fmt.Printf("case-typeconst\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\thash=%x\n", c.hash)
+
+ case Ttypevar:
+ fmt.Printf("case-typevar\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ default:
+ fmt.Printf("case-???\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+ fmt.Printf("\thash=%x\n", c.hash)
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+func ordlcmp(c1 *Case, c2 *Case) int {
+ // sort default first
+ if c1.type_ == Tdefault {
+ return -1
+ }
+ if c2.type_ == Tdefault {
+ return +1
+ }
+
+ // sort nil second
+ if c1.type_ == Ttypenil {
+ return -1
+ }
+ if c2.type_ == Ttypenil {
+ return +1
+ }
+
+ // sort by ordinal
+ if c1.ordinal > c2.ordinal {
+ return +1
+ }
+ if c1.ordinal < c2.ordinal {
+ return -1
+ }
+ return 0
+}
+
+func exprcmp(c1 *Case, c2 *Case) int {
+ var ct int
+ var n int
+ var n1 *Node
+ var n2 *Node
+
+ // sort non-constants last
+ if c1.type_ != Texprconst {
+ return +1
+ }
+ if c2.type_ != Texprconst {
+ return -1
+ }
+
+ n1 = c1.node.Left
+ n2 = c2.node.Left
+
+ // sort by type (for switches on interface)
+ ct = int(n1.Val.Ctype)
+
+ if ct != int(n2.Val.Ctype) {
+ return ct - int(n2.Val.Ctype)
+ }
+ if !Eqtype(n1.Type, n2.Type) {
+ if n1.Type.Vargen > n2.Type.Vargen {
+ return +1
+ } else {
+ return -1
+ }
+ }
+
+ // sort by constant value
+ n = 0
+
+ switch ct {
+ case CTFLT:
+ n = mpcmpfltflt(n1.Val.U.Fval, n2.Val.U.Fval)
+
+ case CTINT,
+ CTRUNE:
+ n = Mpcmpfixfix(n1.Val.U.Xval, n2.Val.U.Xval)
+
+ case CTSTR:
+ n = cmpslit(n1, n2)
+ }
+
+ return n
+}
+
+func typecmp(c1 *Case, c2 *Case) int {
+ // sort non-constants last
+ if c1.type_ != Ttypeconst {
+ return +1
+ }
+ if c2.type_ != Ttypeconst {
+ return -1
+ }
+
+ // sort by hash code
+ if c1.hash > c2.hash {
+ return +1
+ }
+ if c1.hash < c2.hash {
+ return -1
+ }
+
+ // sort by ordinal so duplicate error
+ // happens on later case.
+ if c1.ordinal > c2.ordinal {
+ return +1
+ }
+ if c1.ordinal < c2.ordinal {
+ return -1
+ }
+ return 0
+}
+
+func csort(l *Case, f func(*Case, *Case) int) *Case {
+ var l1 *Case
+ var l2 *Case
+ var le *Case
+
+ if l == nil || l.link == nil {
+ return l
+ }
+
+ l1 = l
+ l2 = l
+ for {
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l1 = l1.link
+ }
+
+ l2 = l1.link
+ l1.link = nil
+ l1 = csort(l, f)
+ l2 = csort(l2, f)
+
+ /* set up lead element */
+ if f(l1, l2) < 0 {
+ l = l1
+ l1 = l1.link
+ } else {
+ l = l2
+ l2 = l2.link
+ }
+
+ le = l
+
+ for {
+ if l1 == nil {
+ for l2 != nil {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+
+ le.link = nil
+ break
+ }
+
+ if l2 == nil {
+ for l1 != nil {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ }
+
+ break
+ }
+
+ if f(l1, l2) < 0 {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ } else {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+ }
+
+ le.link = nil
+ return l
+}
+
+var newlabel_swt_label int
+
+func newlabel_swt() *Node {
+ newlabel_swt_label++
+ namebuf = fmt.Sprintf("%.6d", newlabel_swt_label)
+ return newname(Lookup(namebuf))
+}
+
+/*
+ * build separate list of statements and cases
+ * make labels between cases and statements
+ * deal with fallthrough, break, unreachable statements
+ */
+func casebody(sw *Node, typeswvar *Node) {
+ var n *Node
+ var c *Node
+ var last *Node
+ var def *Node
+ var cas *NodeList
+ var stat *NodeList
+ var l *NodeList
+ var lc *NodeList
+ var go_ *Node
+ var br *Node
+ var lno int32
+ var needvar int32
+
+ if sw.List == nil {
+ return
+ }
+
+ lno = setlineno(sw)
+
+ cas = nil // cases
+ stat = nil // statements
+ def = nil // defaults
+ br = Nod(OBREAK, nil, nil)
+
+ for l = sw.List; l != nil; l = l.Next {
+ n = l.N
+ setlineno(n)
+ if n.Op != OXCASE {
+ Fatal("casebody %v", Oconv(int(n.Op), 0))
+ }
+ n.Op = OCASE
+ needvar = int32(bool2int(count(n.List) != 1 || n.List.N.Op == OLITERAL))
+
+ go_ = Nod(OGOTO, newlabel_swt(), nil)
+ if n.List == nil {
+ if def != nil {
+ Yyerror("more than one default case")
+ }
+
+ // reuse original default case
+ n.Right = go_
+
+ def = n
+ }
+
+ if n.List != nil && n.List.Next == nil {
+ // one case - reuse OCASE node.
+ c = n.List.N
+
+ n.Left = c
+ n.Right = go_
+ n.List = nil
+ cas = list(cas, n)
+ } else {
+ // expand multi-valued cases
+ for lc = n.List; lc != nil; lc = lc.Next {
+ c = lc.N
+ cas = list(cas, Nod(OCASE, c, go_))
+ }
+ }
+
+ stat = list(stat, Nod(OLABEL, go_.Left, nil))
+ if typeswvar != nil && needvar != 0 && n.Nname != nil {
+ var l *NodeList
+
+ l = list1(Nod(ODCL, n.Nname, nil))
+ l = list(l, Nod(OAS, n.Nname, typeswvar))
+ typechecklist(l, Etop)
+ stat = concat(stat, l)
+ }
+
+ stat = concat(stat, n.Nbody)
+
+ // botch - shouldn't fall thru declaration
+ last = stat.End.N
+
+ if last.Xoffset == n.Xoffset && last.Op == OXFALL {
+ if typeswvar != nil {
+ setlineno(last)
+ Yyerror("cannot fallthrough in type switch")
+ }
+
+ if l.Next == nil {
+ setlineno(last)
+ Yyerror("cannot fallthrough final case in switch")
+ }
+
+ last.Op = OFALL
+ } else {
+ stat = list(stat, br)
+ }
+ }
+
+ stat = list(stat, br)
+ if def != nil {
+ cas = list(cas, def)
+ }
+
+ sw.List = cas
+ sw.Nbody = stat
+ lineno = lno
+}
+
+func mkcaselist(sw *Node, arg int) *Case {
+ var n *Node
+ var c *Case
+ var c1 *Case
+ var c2 *Case
+ var l *NodeList
+ var ord int
+
+ c = nil
+ ord = 0
+
+ for l = sw.List; l != nil; l = l.Next {
+ n = l.N
+ c1 = new(Case)
+ c1.link = c
+ c = c1
+
+ ord++
+ if int(uint16(ord)) != ord {
+ Fatal("too many cases in switch")
+ }
+ c.ordinal = uint16(ord)
+ c.node = n
+
+ if n.Left == nil {
+ c.type_ = Tdefault
+ continue
+ }
+
+ switch arg {
+ case Stype:
+ c.hash = 0
+ if n.Left.Op == OLITERAL {
+ c.type_ = Ttypenil
+ continue
+ }
+
+ if Istype(n.Left.Type, TINTER) != 0 {
+ c.type_ = Ttypevar
+ continue
+ }
+
+ c.hash = typehash(n.Left.Type)
+ c.type_ = Ttypeconst
+ continue
+
+ case Snorm,
+ Strue,
+ Sfalse:
+ c.type_ = Texprvar
+ c.hash = typehash(n.Left.Type)
+ switch consttype(n.Left) {
+ case CTFLT,
+ CTINT,
+ CTRUNE,
+ CTSTR:
+ c.type_ = Texprconst
+ }
+
+ continue
+ }
+ }
+
+ if c == nil {
+ return nil
+ }
+
+ // sort by value and diagnose duplicate cases
+ switch arg {
+ case Stype:
+ c = csort(c, typecmp)
+ for c1 = c; c1 != nil; c1 = c1.link {
+ for c2 = c1.link; c2 != nil && c2.hash == c1.hash; c2 = c2.link {
+ if c1.type_ == Ttypenil || c1.type_ == Tdefault {
+ break
+ }
+ if c2.type_ == Ttypenil || c2.type_ == Tdefault {
+ break
+ }
+ if !Eqtype(c1.node.Left.Type, c2.node.Left.Type) {
+ continue
+ }
+ yyerrorl(int(c2.node.Lineno), "duplicate case %v in type switch\n\tprevious case at %v", Tconv(c2.node.Left.Type, 0), c1.node.Line())
+ }
+ }
+
+ case Snorm,
+ Strue,
+ Sfalse:
+ c = csort(c, exprcmp)
+ for c1 = c; c1.link != nil; c1 = c1.link {
+ if exprcmp(c1, c1.link) != 0 {
+ continue
+ }
+ setlineno(c1.link.node)
+ Yyerror("duplicate case %v in switch\n\tprevious case at %v", Nconv(c1.node.Left, 0), c1.node.Line())
+ }
+ }
+
+ // put list back in processing order
+ c = csort(c, ordlcmp)
+
+ return c
+}
+
+var exprname *Node
+
+func exprbsw(c0 *Case, ncase int, arg int) *Node {
+ var cas *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var i int
+ var half int
+ var lno int
+
+ cas = nil
+ if ncase < Ncase {
+ for i = 0; i < ncase; i++ {
+ n = c0.node
+ lno = int(setlineno(n))
+
+ if (arg != Strue && arg != Sfalse) || assignop(n.Left.Type, exprname.Type, nil) == OCONVIFACE || assignop(exprname.Type, n.Left.Type, nil) == OCONVIFACE {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, exprname, n.Left) // if name == val
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // then goto l
+ } else if arg == Strue {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = n.Left // if val
+ a.Nbody = list1(n.Right) // then goto l // arg == Sfalse
+ } else {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(ONOT, n.Left, nil) // if !val
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // then goto l
+ }
+
+ cas = list(cas, a)
+ c0 = c0.link
+ lineno = int32(lno)
+ }
+
+ return liststmt(cas)
+ }
+
+ // find the middle and recur
+ c = c0
+
+ half = ncase >> 1
+ for i = 1; i < half; i++ {
+ c = c.link
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OLE, exprname, c.node.Left)
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(exprbsw(c0, half, arg))
+ a.Nelse = list1(exprbsw(c.link, ncase-half, arg))
+ return a
+}
+
+/*
+ * normal (expression) switch.
+ * rebuild case statements into if .. goto
+ */
+func exprswitch(sw *Node) {
+ var def *Node
+ var cas *NodeList
+ var a *Node
+ var c0 *Case
+ var c *Case
+ var c1 *Case
+ var t *Type
+ var arg int
+ var ncase int
+
+ casebody(sw, nil)
+
+ arg = Snorm
+ if Isconst(sw.Ntest, CTBOOL) != 0 {
+ arg = Strue
+ if sw.Ntest.Val.U.Bval == 0 {
+ arg = Sfalse
+ }
+ }
+
+ walkexpr(&sw.Ntest, &sw.Ninit)
+ t = sw.Type
+ if t == nil {
+ return
+ }
+
+ /*
+ * convert the switch into OIF statements
+ */
+ exprname = nil
+
+ cas = nil
+ if arg == Strue || arg == Sfalse {
+ exprname = Nodbool(bool2int(arg == Strue))
+ } else if consttype(sw.Ntest) >= 0 {
+ // leave constants to enable dead code elimination (issue 9608)
+ exprname = sw.Ntest
+ } else {
+ exprname = temp(sw.Ntest.Type)
+ cas = list1(Nod(OAS, exprname, sw.Ntest))
+ typechecklist(cas, Etop)
+ }
+
+ c0 = mkcaselist(sw, arg)
+ if c0 != nil && c0.type_ == Tdefault {
+ def = c0.node.Right
+ c0 = c0.link
+ } else {
+ def = Nod(OBREAK, nil, nil)
+ }
+
+loop:
+ if c0 == nil {
+ cas = list(cas, def)
+ sw.Nbody = concat(cas, sw.Nbody)
+ sw.List = nil
+ walkstmtlist(sw.Nbody)
+ return
+ }
+
+ // deal with the variables one-at-a-time
+ if !(okforcmp[t.Etype] != 0) || c0.type_ != Texprconst {
+ a = exprbsw(c0, 1, arg)
+ cas = list(cas, a)
+ c0 = c0.link
+ goto loop
+ }
+
+ // do binary search on run of constants
+ ncase = 1
+
+ for c = c0; c.link != nil; c = c.link {
+ if c.link.type_ != Texprconst {
+ break
+ }
+ ncase++
+ }
+
+ // break the chain at the count
+ c1 = c.link
+
+ c.link = nil
+
+ // sort and compile constants
+ c0 = csort(c0, exprcmp)
+
+ a = exprbsw(c0, ncase, arg)
+ cas = list(cas, a)
+
+ c0 = c1
+ goto loop
+}
+
+var hashname *Node
+
+var facename *Node
+
+var boolname *Node
+
+func typeone(t *Node) *Node {
+ var init *NodeList
+ var a *Node
+ var b *Node
+ var var_ *Node
+
+ var_ = t.Nname
+ init = nil
+ if var_ == nil {
+ typecheck(&nblank, Erv|Easgn)
+ var_ = nblank
+ } else {
+ init = list1(Nod(ODCL, var_, nil))
+ }
+
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(var_), boolname) // var,bool =
+ b = Nod(ODOTTYPE, facename, nil)
+ b.Type = t.Left.Type // interface.(type)
+ a.Rlist = list1(b)
+ typecheck(&a, Etop)
+ init = list(init, a)
+
+ b = Nod(OIF, nil, nil)
+ b.Ntest = boolname
+ b.Nbody = list1(t.Right) // if bool { goto l }
+ a = liststmt(list(init, b))
+ return a
+}
+
+func typebsw(c0 *Case, ncase int) *Node {
+ var cas *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var i int
+ var half int
+
+ cas = nil
+
+ if ncase < Ncase {
+ for i = 0; i < ncase; i++ {
+ n = c0.node
+ if c0.type_ != Ttypeconst {
+ Fatal("typebsw")
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, hashname, Nodintconst(int64(c0.hash)))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right)
+ cas = list(cas, a)
+ c0 = c0.link
+ }
+
+ return liststmt(cas)
+ }
+
+ // find the middle and recur
+ c = c0
+
+ half = ncase >> 1
+ for i = 1; i < half; i++ {
+ c = c.link
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OLE, hashname, Nodintconst(int64(c.hash)))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(typebsw(c0, half))
+ a.Nelse = list1(typebsw(c.link, ncase-half))
+ return a
+}
+
+/*
+ * convert switch of the form
+ * switch v := i.(type) { case t1: ..; case t2: ..; }
+ * into if statements
+ */
+func typeswitch(sw *Node) {
+ var def *Node
+ var cas *NodeList
+ var hash *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var c0 *Case
+ var c1 *Case
+ var ncase int
+ var t *Type
+ var v Val
+
+ if sw.Ntest == nil {
+ return
+ }
+ if sw.Ntest.Right == nil {
+ setlineno(sw)
+ Yyerror("type switch must have an assignment")
+ return
+ }
+
+ walkexpr(&sw.Ntest.Right, &sw.Ninit)
+ if !(Istype(sw.Ntest.Right.Type, TINTER) != 0) {
+ Yyerror("type switch must be on an interface")
+ return
+ }
+
+ cas = nil
+
+ /*
+ * predeclare temporary variables
+ * and the boolean var
+ */
+ facename = temp(sw.Ntest.Right.Type)
+
+ a = Nod(OAS, facename, sw.Ntest.Right)
+ typecheck(&a, Etop)
+ cas = list(cas, a)
+
+ casebody(sw, facename)
+
+ boolname = temp(Types[TBOOL])
+ typecheck(&boolname, Erv)
+
+ hashname = temp(Types[TUINT32])
+ typecheck(&hashname, Erv)
+
+ t = sw.Ntest.Right.Type
+ if isnilinter(t) != 0 {
+ a = syslook("efacethash", 1)
+ } else {
+ a = syslook("ifacethash", 1)
+ }
+ argtype(a, t)
+ a = Nod(OCALL, a, nil)
+ a.List = list1(facename)
+ a = Nod(OAS, hashname, a)
+ typecheck(&a, Etop)
+ cas = list(cas, a)
+
+ c0 = mkcaselist(sw, Stype)
+ if c0 != nil && c0.type_ == Tdefault {
+ def = c0.node.Right
+ c0 = c0.link
+ } else {
+ def = Nod(OBREAK, nil, nil)
+ }
+
+ /*
+ * insert if statement into each case block
+ */
+ for c = c0; c != nil; c = c.link {
+ n = c.node
+ switch c.type_ {
+ case Ttypenil:
+ v.Ctype = CTNIL
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, facename, nodlit(v))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // if i==nil { goto l }
+ n.Right = a
+
+ case Ttypevar,
+ Ttypeconst:
+ n.Right = typeone(n)
+ }
+ }
+
+ /*
+ * generate list of if statements, binary search for constant sequences
+ */
+ for c0 != nil {
+ if c0.type_ != Ttypeconst {
+ n = c0.node
+ cas = list(cas, n.Right)
+ c0 = c0.link
+ continue
+ }
+
+ // identify run of constants
+ c = c0
+ c1 = c
+
+ for c.link != nil && c.link.type_ == Ttypeconst {
+ c = c.link
+ }
+ c0 = c.link
+ c.link = nil
+
+ // sort by hash
+ c1 = csort(c1, typecmp)
+
+ // for debugging: linear search
+ if false {
+ for c = c1; c != nil; c = c.link {
+ n = c.node
+ cas = list(cas, n.Right)
+ }
+
+ continue
+ }
+
+ // combine adjacent cases with the same hash
+ ncase = 0
+
+ for c = c1; c != nil; c = c.link {
+ ncase++
+ hash = list1(c.node.Right)
+ for c.link != nil && c.link.hash == c.hash {
+ hash = list(hash, c.link.node.Right)
+ c.link = c.link.link
+ }
+
+ c.node.Right = liststmt(hash)
+ }
+
+ // binary search among cases to narrow by hash
+ cas = list(cas, typebsw(c1, ncase))
+ }
+
+ if nerrors == 0 {
+ cas = list(cas, def)
+ sw.Nbody = concat(cas, sw.Nbody)
+ sw.List = nil
+ walkstmtlist(sw.Nbody)
+ }
+}
+
+func walkswitch(sw *Node) {
+ /*
+ * reorder the body into (OLIST, cases, statements)
+ * cases have OGOTO into statements.
+ * both have inserted OBREAK statements
+ */
+ if sw.Ntest == nil {
+ sw.Ntest = Nodbool(1)
+ typecheck(&sw.Ntest, Erv)
+ }
+
+ if sw.Ntest.Op == OTYPESW {
+ typeswitch(sw)
+
+ //dump("sw", sw);
+ return
+ }
+
+ exprswitch(sw)
+
+ // Discard old AST elements after a walk. They can confuse racewealk.
+ sw.Ntest = nil
+
+ sw.List = nil
+}
+
+/*
+ * type check switch statement
+ */
+func typecheckswitch(n *Node) {
+ var top int
+ var lno int
+ var ptr int
+ var nilonly string
+ var t *Type
+ var badtype *Type
+ var missing *Type
+ var have *Type
+ var l *NodeList
+ var ll *NodeList
+ var ncase *Node
+ var nvar *Node
+ var def *Node
+
+ lno = int(lineno)
+ typechecklist(n.Ninit, Etop)
+ nilonly = ""
+
+ if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ // type switch
+ top = Etype
+
+ typecheck(&n.Ntest.Right, Erv)
+ t = n.Ntest.Right.Type
+ if t != nil && t.Etype != TINTER {
+ Yyerror("cannot type switch on non-interface value %v", Nconv(n.Ntest.Right, obj.FmtLong))
+ }
+ } else {
+ // value switch
+ top = Erv
+
+ if n.Ntest != nil {
+ typecheck(&n.Ntest, Erv)
+ defaultlit(&n.Ntest, nil)
+ t = n.Ntest.Type
+ } else {
+ t = Types[TBOOL]
+ }
+ if t != nil {
+ if !(okforeq[t.Etype] != 0) {
+ Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+ } else if t.Etype == TARRAY && !(Isfixedarray(t) != 0) {
+ nilonly = "slice"
+ } else if t.Etype == TARRAY && Isfixedarray(t) != 0 && algtype1(t, nil) == ANOEQ {
+ Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+ } else if t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ {
+ Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), Tconv(badtype, 0))
+ } else if t.Etype == TFUNC {
+ nilonly = "func"
+ } else if t.Etype == TMAP {
+ nilonly = "map"
+ }
+ }
+ }
+
+ n.Type = t
+
+ def = nil
+ for l = n.List; l != nil; l = l.Next {
+ ncase = l.N
+ setlineno(n)
+ if ncase.List == nil {
+ // default
+ if def != nil {
+ Yyerror("multiple defaults in switch (first at %v)", def.Line())
+ } else {
+ def = ncase
+ }
+ } else {
+ for ll = ncase.List; ll != nil; ll = ll.Next {
+ setlineno(ll.N)
+ typecheck(&ll.N, Erv|Etype)
+ if ll.N.Type == nil || t == nil {
+ continue
+ }
+ setlineno(ncase)
+ switch top {
+ case Erv: // expression switch
+ defaultlit(&ll.N, t)
+
+ if ll.N.Op == OTYPE {
+ Yyerror("type %v is not an expression", Tconv(ll.N.Type, 0))
+ } else if ll.N.Type != nil && !(assignop(ll.N.Type, t, nil) != 0) && !(assignop(t, ll.N.Type, nil) != 0) {
+ if n.Ntest != nil {
+ Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", Nconv(ll.N, 0), Nconv(n.Ntest, 0), Tconv(ll.N.Type, 0), Tconv(t, 0))
+ } else {
+ Yyerror("invalid case %v in switch (mismatched types %v and bool)", Nconv(ll.N, 0), Tconv(ll.N.Type, 0))
+ }
+ } else if nilonly != "" && !(Isconst(ll.N, CTNIL) != 0) {
+ Yyerror("invalid case %v in switch (can only compare %s %v to nil)", Nconv(ll.N, 0), nilonly, Nconv(n.Ntest, 0))
+ }
+
+ case Etype: // type switch
+ if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) != 0 {
+ } else if ll.N.Op != OTYPE && ll.N.Type != nil { // should this be ||?
+ Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
+
+ // reset to original type
+ ll.N = n.Ntest.Right
+ } else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !(implements(ll.N.Type, t, &missing, &have, &ptr) != 0) {
+ if have != nil && !(missing.Broke != 0) && !(have.Broke != 0) {
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort))
+ } else if !(missing.Broke != 0) {
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0))
+ }
+ }
+ }
+ }
+ }
+
+ if top == Etype && n.Type != nil {
+ ll = ncase.List
+ nvar = ncase.Nname
+ if nvar != nil {
+ if ll != nil && ll.Next == nil && ll.N.Type != nil && !(Istype(ll.N.Type, TNIL) != 0) {
+ // single entry type switch
+ nvar.Ntype = typenod(ll.N.Type)
+ } else {
+ // multiple entry type switch or default
+ nvar.Ntype = typenod(n.Type)
+ }
+
+ typecheck(&nvar, Erv|Easgn)
+ ncase.Nname = nvar
+ }
+ }
+
+ typechecklist(ncase.Nbody, Etop)
+ }
+
+ lineno = int32(lno)
+}
diff --git a/src/cmd/internal/gc/typecheck.go b/src/cmd/internal/gc/typecheck.go
new file mode 100644
index 0000000000..9bba6e72ae
--- /dev/null
+++ b/src/cmd/internal/gc/typecheck.go
@@ -0,0 +1,4076 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "math"
+ "strings"
+)
+
+/*
+ * type check the whole tree of an expression.
+ * calculates expression types.
+ * evaluates compile time constants.
+ * marks variables that escape the local frame.
+ * rewrites n->op to be more specific in some cases.
+ */
+var typecheckdefstack *NodeList
+
+/*
+ * resolve ONONAME to definition, if any.
+ */
+func resolve(n *Node) *Node {
+ var r *Node
+
+ if n != nil && n.Op == ONONAME && n.Sym != nil {
+ r = n.Sym.Def
+ if r != nil {
+ if r.Op != OIOTA {
+ n = r
+ } else if n.Iota >= 0 {
+ n = Nodintconst(int64(n.Iota))
+ }
+ }
+ }
+
+ return n
+}
+
+func typechecklist(l *NodeList, top int) {
+ for ; l != nil; l = l.Next {
+ typecheck(&l.N, top)
+ }
+}
+
+var _typekind = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TBOOL: "bool",
+ TSTRING: "string",
+ TPTR32: "pointer",
+ TPTR64: "pointer",
+ TUNSAFEPTR: "unsafe.Pointer",
+ TSTRUCT: "struct",
+ TINTER: "interface",
+ TCHAN: "chan",
+ TMAP: "map",
+ TARRAY: "array",
+ TFUNC: "func",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+}
+
+var typekind_buf string
+
+func typekind(t *Type) string {
+ var et int
+ var s string
+
+ if Isslice(t) != 0 {
+ return "slice"
+ }
+ et = int(t.Etype)
+ if 0 <= et && et < len(_typekind) {
+ s = _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ typekind_buf = fmt.Sprintf("etype=%d", et)
+ return typekind_buf
+}
+
+/*
+ * sprint_depchain prints a dependency chain
+ * of nodes into fmt.
+ * It is used by typecheck in the case of OLITERAL nodes
+ * to print constant definition loops.
+ */
+func sprint_depchain(fmt_ *string, stack *NodeList, cur *Node, first *Node) {
+ var l *NodeList
+
+ for l = stack; l != nil; l = l.Next {
+ if l.N.Op == cur.Op {
+ if l.N != first {
+ sprint_depchain(fmt_, l.Next, l.N, first)
+ }
+ *fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", l.N.Line(), Nconv(l.N, 0), Nconv(cur, 0))
+ return
+ }
+ }
+}
+
+/*
+ * type check node *np.
+ * replaces *np with a new pointer in some cases.
+ * returns the final value of *np as a convenience.
+ */
+
+var typecheck_tcstack *NodeList
+var typecheck_tcfree *NodeList
+
+func typecheck(np **Node, top int) *Node {
+ var n *Node
+ var lno int
+ var fmt_ string
+ var l *NodeList
+
+ // cannot type check until all the source has been parsed
+ if !(typecheckok != 0) {
+ Fatal("early typecheck")
+ }
+
+ n = *np
+ if n == nil {
+ return nil
+ }
+
+ lno = int(setlineno(n))
+
+ // Skip over parens.
+ for n.Op == OPAREN {
+ n = n.Left
+ }
+
+ // Resolve definition of name and value of iota lazily.
+ n = resolve(n)
+
+ *np = n
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck == 1 {
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OLITERAL,
+ OPACK:
+ break
+
+ default:
+ lineno = int32(lno)
+ return n
+ }
+ }
+
+ if n.Typecheck == 2 {
+ // Typechecking loop. Trying printing a meaningful message,
+ // otherwise a stack trace of typechecking.
+ switch n.Op {
+ // We can already diagnose variables used as types.
+ case ONAME:
+ if top&(Erv|Etype) == Etype {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ }
+
+ case OLITERAL:
+ if top&(Erv|Etype) == Etype {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ break
+ }
+
+ fmt_ = ""
+ sprint_depchain(&fmt_, typecheck_tcstack, n, n)
+ yyerrorl(int(n.Lineno), "constant definition loop%s", fmt_)
+ }
+
+ if nsavederrors+nerrors == 0 {
+ fmt_ = ""
+ for l = typecheck_tcstack; l != nil; l = l.Next {
+ fmt_ += fmt.Sprintf("\n\t%v %v", l.N.Line(), Nconv(l.N, 0))
+ }
+ Yyerror("typechecking loop involving %v%s", Nconv(n, 0), fmt_)
+ }
+
+ lineno = int32(lno)
+ return n
+ }
+
+ n.Typecheck = 2
+
+ if typecheck_tcfree != nil {
+ l = typecheck_tcfree
+ typecheck_tcfree = l.Next
+ } else {
+ l = new(NodeList)
+ }
+ l.Next = typecheck_tcstack
+ l.N = n
+ typecheck_tcstack = l
+
+ typecheck1(&n, top)
+ *np = n
+ n.Typecheck = 1
+
+ if typecheck_tcstack != l {
+ Fatal("typecheck stack out of sync")
+ }
+ typecheck_tcstack = l.Next
+ l.Next = typecheck_tcfree
+ typecheck_tcfree = l
+
+ lineno = int32(lno)
+ return n
+}
+
+/*
+ * does n contain a call or receive operation?
+ */
+func callrecv(n *Node) int {
+ if n == nil {
+ return 0
+ }
+
+ switch n.Op {
+ case OCALL,
+ OCALLMETH,
+ OCALLINTER,
+ OCALLFUNC,
+ ORECV,
+ OCAP,
+ OLEN,
+ OCOPY,
+ ONEW,
+ OAPPEND,
+ ODELETE:
+ return 1
+ }
+
+ return bool2int(callrecv(n.Left) != 0 || callrecv(n.Right) != 0 || callrecv(n.Ntest) != 0 || callrecv(n.Nincr) != 0 || callrecvlist(n.Ninit) != 0 || callrecvlist(n.Nbody) != 0 || callrecvlist(n.Nelse) != 0 || callrecvlist(n.List) != 0 || callrecvlist(n.Rlist) != 0)
+}
+
+func callrecvlist(l *NodeList) int {
+ for ; l != nil; l = l.Next {
+ if callrecv(l.N) != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is equivalent to defaultlit
+// except for constants of numerical kind, which are acceptable
+// whenever they can be represented by a value of type int.
+func indexlit(np **Node) {
+ var n *Node
+
+ n = *np
+ if n == nil || !(isideal(n.Type) != 0) {
+ return
+ }
+ switch consttype(n) {
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ defaultlit(np, Types[TINT])
+ }
+
+ defaultlit(np, nil)
+}
+
+func typecheck1(np **Node, top int) {
+ var et int
+ var aop int
+ var op int
+ var ptr int
+ var n *Node
+ var l *Node
+ var r *Node
+ var lo *Node
+ var mid *Node
+ var hi *Node
+ var args *NodeList
+ var ok int
+ var ntop int
+ var t *Type
+ var tp *Type
+ var missing *Type
+ var have *Type
+ var badtype *Type
+ var v Val
+ var why string
+ var desc string
+ var descbuf string
+ var x int64
+
+ n = *np
+
+ if n.Sym != nil {
+ if n.Op == ONAME && n.Etype != 0 && !(top&Ecall != 0) {
+ Yyerror("use of builtin %v not in function call", Sconv(n.Sym, 0))
+ goto error
+ }
+
+ typecheckdef(n)
+ if n.Op == ONONAME {
+ goto error
+ }
+ }
+
+ *np = n
+
+reswitch:
+ ok = 0
+ switch n.Op {
+ // until typecheck is complete, do nothing.
+ default:
+ Dump("typecheck", n)
+
+ Fatal("typecheck %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ /*
+ * names
+ */
+ case OLITERAL:
+ ok |= Erv
+
+ if n.Type == nil && n.Val.Ctype == CTSTR {
+ n.Type = idealstring
+ }
+ goto ret
+
+ case ONONAME:
+ ok |= Erv
+ goto ret
+
+ case ONAME:
+ if n.Decldepth == 0 {
+ n.Decldepth = decldepth
+ }
+ if n.Etype != 0 {
+ ok |= Ecall
+ goto ret
+ }
+
+ if !(top&Easgn != 0) {
+ // not a write to the variable
+ if isblank(n) {
+ Yyerror("cannot use _ as value")
+ goto error
+ }
+
+ n.Used = 1
+ }
+
+ if !(top&Ecall != 0) && isunsafebuiltin(n) != 0 {
+ Yyerror("%v is not an expression, must be called", Nconv(n, 0))
+ goto error
+ }
+
+ ok |= Erv
+ goto ret
+
+ case OPACK:
+ Yyerror("use of package %v without selector", Sconv(n.Sym, 0))
+ goto error
+
+ case ODDD:
+ break
+
+ /*
+ * types (OIND is with exprs)
+ */
+ case OTYPE:
+ ok |= Etype
+
+ if n.Type == nil {
+ goto error
+ }
+
+ case OTARRAY:
+ ok |= Etype
+ t = typ(TARRAY)
+ l = n.Left
+ r = n.Right
+ if l == nil {
+ t.Bound = -1 // slice
+ } else if l.Op == ODDD {
+ t.Bound = -100 // to be filled in
+ if !(top&Ecomplit != 0) && !(n.Diag != 0) {
+ t.Broke = 1
+ n.Diag = 1
+ Yyerror("use of [...] array outside of array literal")
+ }
+ } else {
+ l = typecheck(&n.Left, Erv)
+ switch consttype(l) {
+ case CTINT,
+ CTRUNE:
+ v = l.Val
+
+ case CTFLT:
+ v = toint(l.Val)
+
+ default:
+ if l.Type != nil && Isint[l.Type.Etype] != 0 && l.Op != OLITERAL {
+ Yyerror("non-constant array bound %v", Nconv(l, 0))
+ } else {
+ Yyerror("invalid array bound %v", Nconv(l, 0))
+ }
+ goto error
+ }
+
+ t.Bound = Mpgetfix(v.U.Xval)
+ if doesoverflow(v, Types[TINT]) != 0 {
+ Yyerror("array bound is too large")
+ goto error
+ } else if t.Bound < 0 {
+ Yyerror("array bound must be non-negative")
+ goto error
+ }
+ }
+
+ typecheck(&r, Etype)
+ if r.Type == nil {
+ goto error
+ }
+ t.Type = r.Type
+ n.Op = OTYPE
+ n.Type = t
+ n.Left = nil
+ n.Right = nil
+ if t.Bound != -100 {
+ checkwidth(t)
+ }
+
+ case OTMAP:
+ ok |= Etype
+ l = typecheck(&n.Left, Etype)
+ r = typecheck(&n.Right, Etype)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ n.Op = OTYPE
+ n.Type = maptype(l.Type, r.Type)
+ n.Left = nil
+ n.Right = nil
+
+ case OTCHAN:
+ ok |= Etype
+ l = typecheck(&n.Left, Etype)
+ if l.Type == nil {
+ goto error
+ }
+ t = typ(TCHAN)
+ t.Type = l.Type
+ t.Chan = n.Etype
+ n.Op = OTYPE
+ n.Type = t
+ n.Left = nil
+ n.Etype = 0
+
+ case OTSTRUCT:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = tostruct(n.List)
+ if n.Type == nil || n.Type.Broke != 0 {
+ goto error
+ }
+ n.List = nil
+
+ case OTINTER:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = tointerface(n.List)
+ if n.Type == nil {
+ goto error
+ }
+
+ case OTFUNC:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = functype(n.Left, n.List, n.Rlist)
+ if n.Type == nil {
+ goto error
+ }
+
+ /*
+ * type or expr
+ */
+ case OIND:
+ ntop = Erv | Etype
+
+ if !(top&Eaddr != 0) { // The *x in &*x is not an indirect.
+ ntop |= Eindir
+ }
+ ntop |= top & Ecomplit
+ l = typecheck(&n.Left, ntop)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if l.Op == OTYPE {
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = Ptrto(l.Type)
+ n.Left = nil
+ goto ret
+ }
+
+ if !(Isptr[t.Etype] != 0) {
+ if top&(Erv|Etop) != 0 {
+ Yyerror("invalid indirect of %v", Nconv(n.Left, obj.FmtLong))
+ goto error
+ }
+
+ goto ret
+ }
+
+ ok |= Erv
+ n.Type = t.Type
+ goto ret
+
+ /*
+ * arithmetic exprs
+ */
+ case OASOP:
+ ok |= Etop
+
+ l = typecheck(&n.Left, Erv)
+ r = typecheck(&n.Right, Erv)
+ checkassign(n, n.Left)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ op = int(n.Etype)
+ goto arith
+
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ ORSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ OSUB,
+ OXOR:
+ ok |= Erv
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ r = typecheck(&n.Right, Erv|top&Eiota)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ op = int(n.Op)
+ goto arith
+
+ case OCOM,
+ OMINUS,
+ ONOT,
+ OPLUS:
+ ok |= Erv
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if !(okfor[n.Op][t.Etype] != 0) {
+ Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), Tconv(t, 0))
+ goto error
+ }
+
+ n.Type = t
+ goto ret
+
+ /*
+ * exprs
+ */
+ case OADDR:
+ ok |= Erv
+
+ typecheck(&n.Left, Erv|Eaddr)
+ if n.Left.Type == nil {
+ goto error
+ }
+ checklvalue(n.Left, "take the address of")
+ r = outervalue(n.Left)
+ for l = n.Left; l != r; l = l.Left {
+ l.Addrtaken = 1
+ if l.Closure != nil {
+ l.Closure.Addrtaken = 1
+ }
+ }
+
+ if l.Orig != l && l.Op == ONAME {
+ Fatal("found non-orig name node %v", Nconv(l, 0))
+ }
+ l.Addrtaken = 1
+ if l.Closure != nil {
+ l.Closure.Addrtaken = 1
+ }
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ n.Type = Ptrto(t)
+ goto ret
+
+ case OCOMPLIT:
+ ok |= Erv
+ typecheckcomplit(&n)
+ if n.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case OXDOT:
+ n = adddot(n)
+ n.Op = ODOT
+ if n.Left == nil {
+ goto error
+ }
+ fallthrough
+
+ // fall through
+ case ODOT:
+ typecheck(&n.Left, Erv|Etype)
+
+ defaultlit(&n.Left, nil)
+ if n.Right.Op != ONAME {
+ Yyerror("rhs of . must be a name") // impossible
+ goto error
+ }
+
+ t = n.Left.Type
+ if t == nil {
+ adderrorname(n)
+ goto error
+ }
+
+ r = n.Right
+
+ if n.Left.Op == OTYPE {
+ if !(looktypedot(n, t, 0) != 0) {
+ if looktypedot(n, t, 1) != 0 {
+ Yyerror("%v undefined (cannot refer to unexported method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
+ } else {
+ Yyerror("%v undefined (type %v has no method %v)", Nconv(n, 0), Tconv(t, 0), Sconv(n.Right.Sym, 0))
+ }
+ goto error
+ }
+
+ if n.Type.Etype != TFUNC || n.Type.Thistuple != 1 {
+ Yyerror("type %v has no method %v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort))
+ n.Type = nil
+ goto error
+ }
+
+ n.Op = ONAME
+ n.Sym = n.Right.Sym
+ n.Type = methodfunc(n.Type, n.Left.Type)
+ n.Xoffset = 0
+ n.Class = PFUNC
+ ok = Erv
+ goto ret
+ }
+
+ if Isptr[t.Etype] != 0 && t.Type.Etype != TINTER {
+ t = t.Type
+ if t == nil {
+ goto error
+ }
+ n.Op = ODOTPTR
+ checkwidth(t)
+ }
+
+ if isblank(n.Right) {
+ Yyerror("cannot refer to blank field or method")
+ goto error
+ }
+
+ if !(lookdot(n, t, 0) != 0) {
+ if lookdot(n, t, 1) != 0 {
+ Yyerror("%v undefined (cannot refer to unexported field or method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
+ } else {
+ Yyerror("%v undefined (type %v has no field or method %v)", Nconv(n, 0), Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
+ }
+ goto error
+ }
+
+ switch n.Op {
+ case ODOTINTER,
+ ODOTMETH:
+ if top&Ecall != 0 {
+ ok |= Ecall
+ } else {
+ typecheckpartialcall(n, r)
+ ok |= Erv
+ }
+
+ default:
+ ok |= Erv
+ }
+
+ goto ret
+
+ case ODOTTYPE:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if !(Isinter(t) != 0) {
+ Yyerror("invalid type assertion: %v (non-interface type %v on left)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if n.Right != nil {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+ n.Right = nil
+ if n.Type == nil {
+ goto error
+ }
+ }
+
+ if n.Type != nil && n.Type.Etype != TINTER {
+ if !(implements(n.Type, t, &missing, &have, &ptr) != 0) {
+ if have != nil && have.Sym == missing.Sym {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else if ptr != 0 {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0))
+ } else if have != nil {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0))
+ }
+ goto error
+ }
+ }
+
+ goto ret
+
+ case OINDEX:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ implicitstar(&n.Left)
+ l = n.Left
+ typecheck(&n.Right, Erv)
+ r = n.Right
+ t = l.Type
+ if t == nil || r.Type == nil {
+ goto error
+ }
+ switch t.Etype {
+ default:
+ Yyerror("invalid operation: %v (type %v does not support indexing)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+
+ case TSTRING,
+ TARRAY:
+ indexlit(&n.Right)
+ if t.Etype == TSTRING {
+ n.Type = Types[TUINT8]
+ } else {
+ n.Type = t.Type
+ }
+ why = "string"
+ if t.Etype == TARRAY {
+ if Isfixedarray(t) != 0 {
+ why = "array"
+ } else {
+ why = "slice"
+ }
+ }
+
+ if n.Right.Type != nil && !(Isint[n.Right.Type.Etype] != 0) {
+ Yyerror("non-integer %s index %v", why, Nconv(n.Right, 0))
+ break
+ }
+
+ if Isconst(n.Right, CTINT) != 0 {
+ x = Mpgetfix(n.Right.Val.U.Xval)
+ if x < 0 {
+ Yyerror("invalid %s index %v (index must be non-negative)", why, Nconv(n.Right, 0))
+ } else if Isfixedarray(t) != 0 && t.Bound > 0 && x >= t.Bound {
+ Yyerror("invalid array index %v (out of bounds for %d-element array)", Nconv(n.Right, 0), t.Bound)
+ } else if Isconst(n.Left, CTSTR) != 0 && x >= int64(len(n.Left.Val.U.Sval.S)) {
+ Yyerror("invalid string index %v (out of bounds for %d-byte string)", Nconv(n.Right, 0), len(n.Left.Val.U.Sval.S))
+ } else if Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("invalid %s index %v (index too large)", why, Nconv(n.Right, 0))
+ }
+ }
+
+ case TMAP:
+ n.Etype = 0
+ defaultlit(&n.Right, t.Down)
+ if n.Right.Type != nil {
+ n.Right = assignconv(n.Right, t.Down, "map index")
+ }
+ n.Type = t.Type
+ n.Op = OINDEXMAP
+ }
+
+ goto ret
+
+ case ORECV:
+ ok |= Etop | Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (receive from non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if !(t.Chan&Crecv != 0) {
+ Yyerror("invalid operation: %v (receive from send-only type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ n.Type = t.Type
+ goto ret
+
+ case OSEND:
+ ok |= Etop
+ l = typecheck(&n.Left, Erv)
+ typecheck(&n.Right, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (send to non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if !(t.Chan&Csend != 0) {
+ Yyerror("invalid operation: %v (send to receive-only type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ defaultlit(&n.Right, t.Type)
+ r = n.Right
+ if r.Type == nil {
+ goto error
+ }
+ n.Right = assignconv(r, l.Type.Type, "send")
+
+ // TODO: more aggressive
+ n.Etype = 0
+
+ n.Type = nil
+ goto ret
+
+ case OSLICE:
+ ok |= Erv
+ typecheck(&n.Left, top)
+ typecheck(&n.Right.Left, Erv)
+ typecheck(&n.Right.Right, Erv)
+ defaultlit(&n.Left, nil)
+ indexlit(&n.Right.Left)
+ indexlit(&n.Right.Right)
+ l = n.Left
+ if Isfixedarray(l.Type) != 0 {
+ if !(islvalue(n.Left) != 0) {
+ Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
+ goto error
+ }
+
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ l = n.Left
+ }
+
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ tp = nil
+ if Istype(t, TSTRING) != 0 {
+ n.Type = t
+ n.Op = OSLICESTR
+ } else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ tp = t.Type
+ n.Type = typ(TARRAY)
+ n.Type.Type = tp.Type
+ n.Type.Bound = -1
+ dowidth(n.Type)
+ n.Op = OSLICEARR
+ } else if Isslice(t) != 0 {
+ n.Type = t
+ } else {
+ Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+
+ lo = n.Right.Left
+ if lo != nil && checksliceindex(l, lo, tp) < 0 {
+ goto error
+ }
+ hi = n.Right.Right
+ if hi != nil && checksliceindex(l, hi, tp) < 0 {
+ goto error
+ }
+ if checksliceconst(lo, hi) < 0 {
+ goto error
+ }
+ goto ret
+
+ case OSLICE3:
+ ok |= Erv
+ typecheck(&n.Left, top)
+ typecheck(&n.Right.Left, Erv)
+ typecheck(&n.Right.Right.Left, Erv)
+ typecheck(&n.Right.Right.Right, Erv)
+ defaultlit(&n.Left, nil)
+ indexlit(&n.Right.Left)
+ indexlit(&n.Right.Right.Left)
+ indexlit(&n.Right.Right.Right)
+ l = n.Left
+ if Isfixedarray(l.Type) != 0 {
+ if !(islvalue(n.Left) != 0) {
+ Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
+ goto error
+ }
+
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ l = n.Left
+ }
+
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ tp = nil
+ if Istype(t, TSTRING) != 0 {
+ Yyerror("invalid operation %v (3-index slice of string)", Nconv(n, 0))
+ goto error
+ }
+
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ tp = t.Type
+ n.Type = typ(TARRAY)
+ n.Type.Type = tp.Type
+ n.Type.Bound = -1
+ dowidth(n.Type)
+ n.Op = OSLICE3ARR
+ } else if Isslice(t) != 0 {
+ n.Type = t
+ } else {
+ Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+
+ lo = n.Right.Left
+ if lo != nil && checksliceindex(l, lo, tp) < 0 {
+ goto error
+ }
+ mid = n.Right.Right.Left
+ if mid != nil && checksliceindex(l, mid, tp) < 0 {
+ goto error
+ }
+ hi = n.Right.Right.Right
+ if hi != nil && checksliceindex(l, hi, tp) < 0 {
+ goto error
+ }
+ if checksliceconst(lo, hi) < 0 || checksliceconst(lo, mid) < 0 || checksliceconst(mid, hi) < 0 {
+ goto error
+ }
+ goto ret
+
+ /*
+ * call and call like
+ */
+ case OCALL:
+ l = n.Left
+
+ if l.Op == ONAME {
+ r = unsafenmagic(n)
+ if r != nil {
+ if n.Isddd != 0 {
+ Yyerror("invalid use of ... with builtin %v", Nconv(l, 0))
+ }
+ n = r
+ goto reswitch
+ }
+ }
+
+ typecheck(&n.Left, Erv|Etype|Ecall|top&Eproc)
+ n.Diag |= n.Left.Diag
+ l = n.Left
+ if l.Op == ONAME && l.Etype != 0 {
+ if n.Isddd != 0 && l.Etype != OAPPEND {
+ Yyerror("invalid use of ... with builtin %v", Nconv(l, 0))
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ n.Op = l.Etype
+
+ n.Left = n.Right
+ n.Right = nil
+ goto reswitch
+ }
+
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ if l.Op == OTYPE {
+ if n.Isddd != 0 || l.Type.Bound == -100 {
+ if !(l.Type.Broke != 0) {
+ Yyerror("invalid use of ... in type conversion", l)
+ }
+ n.Diag = 1
+ }
+
+ // pick off before type-checking arguments
+ ok |= Erv
+
+ // turn CALL(type, arg) into CONV(arg) w/ type
+ n.Left = nil
+
+ n.Op = OCONV
+ n.Type = l.Type
+ if onearg(n, "conversion to %v", Tconv(l.Type, 0)) < 0 {
+ goto error
+ }
+ goto doconv
+ }
+
+ if count(n.List) == 1 && !(n.Isddd != 0) {
+ typecheck(&n.List.N, Erv|Efnstruct)
+ } else {
+ typechecklist(n.List, Erv)
+ }
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ checkwidth(t)
+
+ switch l.Op {
+ case ODOTINTER:
+ n.Op = OCALLINTER
+
+ case ODOTMETH:
+ n.Op = OCALLMETH
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp = getthisx(t).Type.Type
+
+ if l.Left == nil || !Eqtype(l.Left.Type, tp) {
+ Fatal("method receiver")
+ }
+
+ default:
+ n.Op = OCALLFUNC
+ if t.Etype != TFUNC {
+ Yyerror("cannot call non-function %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+ }
+
+ descbuf = fmt.Sprintf("argument to %v", Nconv(n.Left, 0))
+ desc = descbuf
+ typecheckaste(OCALL, n.Left, int(n.Isddd), getinargx(t), n.List, desc)
+ ok |= Etop
+ if t.Outtuple == 0 {
+ goto ret
+ }
+ ok |= Erv
+ if t.Outtuple == 1 {
+ t = getoutargx(l.Type).Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype == TFIELD {
+ t = t.Type
+ }
+ n.Type = t
+ goto ret
+ }
+
+ // multiple return
+ if !(top&(Efnstruct|Etop) != 0) {
+ Yyerror("multiple-value %v() in single-value context", Nconv(l, 0))
+ goto ret
+ }
+
+ n.Type = getoutargx(l.Type)
+ goto ret
+
+ case OCAP,
+ OLEN,
+ OREAL,
+ OIMAG:
+ ok |= Erv
+ if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ implicitstar(&n.Left)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ switch n.Op {
+ case OCAP:
+ if !(okforcap[t.Etype] != 0) {
+ goto badcall1
+ }
+
+ case OLEN:
+ if !(okforlen[t.Etype] != 0) {
+ goto badcall1
+ }
+
+ case OREAL,
+ OIMAG:
+ if !(Iscomplex[t.Etype] != 0) {
+ goto badcall1
+ }
+ if Isconst(l, CTCPLX) != 0 {
+ r = n
+ if n.Op == OREAL {
+ n = nodfltconst(&l.Val.U.Cval.Real)
+ } else {
+ n = nodfltconst(&l.Val.U.Cval.Imag)
+ }
+ n.Orig = r
+ }
+
+ n.Type = Types[cplxsubtype(int(t.Etype))]
+ goto ret
+ }
+
+ // might be constant
+ switch t.Etype {
+ case TSTRING:
+ if Isconst(l, CTSTR) != 0 {
+ r = Nod(OXXX, nil, nil)
+ Nodconst(r, Types[TINT], int64(len(l.Val.U.Sval.S)))
+ r.Orig = n
+ n = r
+ }
+
+ case TARRAY:
+ if t.Bound < 0 { // slice
+ break
+ }
+ if callrecv(l) != 0 { // has call or receive
+ break
+ }
+ r = Nod(OXXX, nil, nil)
+ Nodconst(r, Types[TINT], t.Bound)
+ r.Orig = n
+ n = r
+ }
+
+ n.Type = Types[TINT]
+ goto ret
+
+ case OCOMPLEX:
+ ok |= Erv
+ if count(n.List) == 1 {
+ typechecklist(n.List, Efnstruct)
+ if n.List.N.Op != OCALLFUNC && n.List.N.Op != OCALLMETH {
+ Yyerror("invalid operation: complex expects two arguments")
+ goto error
+ }
+
+ t = n.List.N.Left.Type
+ if t.Outtuple != 2 {
+ Yyerror("invalid operation: complex expects two arguments, %v returns %d results", Nconv(n.List.N, 0), t.Outtuple)
+ goto error
+ }
+
+ t = n.List.N.Type.Type
+ l = t.Nname
+ r = t.Down.Nname
+ } else {
+ if twoarg(n) < 0 {
+ goto error
+ }
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ r = typecheck(&n.Right, Erv|top&Eiota)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ defaultlit2(&l, &r, 0)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ n.Left = l
+ n.Right = r
+ }
+
+ if !Eqtype(l.Type, r.Type) {
+ Yyerror("invalid operation: %v (mismatched types %v and %v)", Nconv(n, 0), Tconv(l.Type, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ switch l.Type.Etype {
+ default:
+ Yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", Nconv(n, 0), Tconv(l.Type, 0), r.Type)
+ goto error
+
+ case TIDEAL:
+ t = Types[TIDEAL]
+
+ case TFLOAT32:
+ t = Types[TCOMPLEX64]
+
+ case TFLOAT64:
+ t = Types[TCOMPLEX128]
+ }
+
+ if l.Op == OLITERAL && r.Op == OLITERAL {
+ // make it a complex literal
+ r = nodcplxlit(l.Val, r.Val)
+
+ r.Orig = n
+ n = r
+ }
+
+ n.Type = t
+ goto ret
+
+ case OCLOSE:
+ if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if !(t.Chan&Csend != 0) {
+ Yyerror("invalid operation: %v (cannot close receive-only channel)", Nconv(n, 0))
+ goto error
+ }
+
+ ok |= Etop
+ goto ret
+
+ case ODELETE:
+ args = n.List
+ if args == nil {
+ Yyerror("missing arguments to delete")
+ goto error
+ }
+
+ if args.Next == nil {
+ Yyerror("missing second (key) argument to delete")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to delete")
+ goto error
+ }
+
+ ok |= Etop
+ typechecklist(args, Erv)
+ l = args.N
+ r = args.Next.N
+ if l.Type != nil && l.Type.Etype != TMAP {
+ Yyerror("first argument to delete must be map; have %v", Tconv(l.Type, obj.FmtLong))
+ goto error
+ }
+
+ args.Next.N = assignconv(r, l.Type.Down, "delete")
+ goto ret
+
+ case OAPPEND:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing arguments to append")
+ goto error
+ }
+
+ if count(args) == 1 && !(n.Isddd != 0) {
+ typecheck(&args.N, Erv|Efnstruct)
+ } else {
+ typechecklist(args, Erv)
+ }
+
+ t = args.N.Type
+ if t == nil {
+ goto error
+ }
+
+ // Unpack multiple-return result before type-checking.
+ if Istype(t, TSTRUCT) != 0 && t.Funarg != 0 {
+ t = t.Type
+ if Istype(t, TFIELD) != 0 {
+ t = t.Type
+ }
+ }
+
+ n.Type = t
+ if !(Isslice(t) != 0) {
+ if Isconst(args.N, CTNIL) != 0 {
+ Yyerror("first argument to append must be typed slice; have untyped nil", t)
+ goto error
+ }
+
+ Yyerror("first argument to append must be slice; have %v", Tconv(t, obj.FmtLong))
+ goto error
+ }
+
+ if n.Isddd != 0 {
+ if args.Next == nil {
+ Yyerror("cannot use ... on first argument to append")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to append")
+ goto error
+ }
+
+ if Istype(t.Type, TUINT8) != 0 && Istype(args.Next.N.Type, TSTRING) != 0 {
+ defaultlit(&args.Next.N, Types[TSTRING])
+ goto ret
+ }
+
+ args.Next.N = assignconv(args.Next.N, t.Orig, "append")
+ goto ret
+ }
+
+ for args = args.Next; args != nil; args = args.Next {
+ if args.N.Type == nil {
+ continue
+ }
+ args.N = assignconv(args.N, t.Type, "append")
+ }
+
+ goto ret
+
+ case OCOPY:
+ ok |= Etop | Erv
+ args = n.List
+ if args == nil || args.Next == nil {
+ Yyerror("missing arguments to copy")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to copy")
+ goto error
+ }
+
+ n.Left = args.N
+ n.Right = args.Next.N
+ n.List = nil
+ n.Type = Types[TINT]
+ typecheck(&n.Left, Erv)
+ typecheck(&n.Right, Erv)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto error
+ }
+ defaultlit(&n.Left, nil)
+ defaultlit(&n.Right, nil)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto error
+ }
+
+ // copy([]byte, string)
+ if Isslice(n.Left.Type) != 0 && n.Right.Type.Etype == TSTRING {
+ if Eqtype(n.Left.Type.Type, bytetype) {
+ goto ret
+ }
+ Yyerror("arguments to copy have different element types: %v and string", Tconv(n.Left.Type, obj.FmtLong))
+ goto error
+ }
+
+ if !(Isslice(n.Left.Type) != 0) || !(Isslice(n.Right.Type) != 0) {
+ if !(Isslice(n.Left.Type) != 0) && !(Isslice(n.Right.Type) != 0) {
+ Yyerror("arguments to copy must be slices; have %v, %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+ } else if !(Isslice(n.Left.Type) != 0) {
+ Yyerror("first argument to copy should be slice; have %v", Tconv(n.Left.Type, obj.FmtLong))
+ } else {
+ Yyerror("second argument to copy should be slice or string; have %v", Tconv(n.Right.Type, obj.FmtLong))
+ }
+ goto error
+ }
+
+ if !Eqtype(n.Left.Type.Type, n.Right.Type.Type) {
+ Yyerror("arguments to copy have different element types: %v and %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+ goto error
+ }
+
+ goto ret
+
+ case OCONV:
+ goto doconv
+
+ case OMAKE:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing argument to make")
+ goto error
+ }
+
+ n.List = nil
+ l = args.N
+ args = args.Next
+ typecheck(&l, Etype)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+
+ switch t.Etype {
+ default:
+ Yyerror("cannot make type %v", Tconv(t, 0))
+ goto error
+
+ case TARRAY:
+ if !(Isslice(t) != 0) {
+ Yyerror("cannot make type %v", Tconv(t, 0))
+ goto error
+ }
+
+ if args == nil {
+ Yyerror("missing len argument to make(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ r = nil
+ if args != nil {
+ r = args.N
+ args = args.Next
+ typecheck(&r, Erv)
+ }
+
+ if l.Type == nil || (r != nil && r.Type == nil) {
+ goto error
+ }
+ et = bool2int(checkmake(t, "len", l) < 0)
+ et |= bool2int(r != nil && checkmake(t, "cap", r) < 0)
+ if et != 0 {
+ goto error
+ }
+ if Isconst(l, CTINT) != 0 && r != nil && Isconst(r, CTINT) != 0 && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
+ Yyerror("len larger than cap in make(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ n.Left = l
+ n.Right = r
+ n.Op = OMAKESLICE
+
+ case TMAP:
+ if args != nil {
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ defaultlit(&l, Types[TINT])
+ if l.Type == nil {
+ goto error
+ }
+ if checkmake(t, "size", l) < 0 {
+ goto error
+ }
+ n.Left = l
+ } else {
+ n.Left = Nodintconst(0)
+ }
+ n.Op = OMAKEMAP
+
+ case TCHAN:
+ l = nil
+ if args != nil {
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ defaultlit(&l, Types[TINT])
+ if l.Type == nil {
+ goto error
+ }
+ if checkmake(t, "buffer", l) < 0 {
+ goto error
+ }
+ n.Left = l
+ } else {
+ n.Left = Nodintconst(0)
+ }
+ n.Op = OMAKECHAN
+ }
+
+ if args != nil {
+ Yyerror("too many arguments to make(%v)", Tconv(t, 0))
+ n.Op = OMAKE
+ goto error
+ }
+
+ n.Type = t
+ goto ret
+
+ case ONEW:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing argument to new")
+ goto error
+ }
+
+ l = args.N
+ typecheck(&l, Etype)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if args.Next != nil {
+ Yyerror("too many arguments to new(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ n.Left = l
+ n.Type = Ptrto(t)
+ goto ret
+
+ case OPRINT,
+ OPRINTN:
+ ok |= Etop
+ typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
+ for args = n.List; args != nil; args = args.Next {
+ // Special case for print: int constant is int64, not int.
+ if Isconst(args.N, CTINT) != 0 {
+ defaultlit(&args.N, Types[TINT64])
+ } else {
+ defaultlit(&args.N, nil)
+ }
+ }
+
+ goto ret
+
+ case OPANIC:
+ ok |= Etop
+ if onearg(n, "panic") < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, Types[TINTER])
+ if n.Left.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case ORECOVER:
+ ok |= Erv | Etop
+ if n.List != nil {
+ Yyerror("too many arguments to recover")
+ goto error
+ }
+
+ n.Type = Types[TINTER]
+ goto ret
+
+ case OCLOSURE:
+ ok |= Erv
+ typecheckclosure(n, top)
+ if n.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case OITAB:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ t = n.Left.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TINTER {
+ Fatal("OITAB of %v", Tconv(t, 0))
+ }
+ n.Type = Ptrto(Types[TUINTPTR])
+ goto ret
+
+ case OSPTR:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ t = n.Left.Type
+ if t == nil {
+ goto error
+ }
+ if !(Isslice(t) != 0) && t.Etype != TSTRING {
+ Fatal("OSPTR of %v", Tconv(t, 0))
+ }
+ if t.Etype == TSTRING {
+ n.Type = Ptrto(Types[TUINT8])
+ } else {
+ n.Type = Ptrto(t.Type)
+ }
+ goto ret
+
+ case OCLOSUREVAR:
+ ok |= Erv
+ goto ret
+
+ case OCFUNC:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ n.Type = Types[TUINTPTR]
+ goto ret
+
+ case OCONVNOP:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ goto ret
+
+ /*
+ * statements
+ */
+ case OAS:
+ ok |= Etop
+
+ typecheckas(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+ n.Left.Defn = n
+ }
+ goto ret
+
+ case OAS2:
+ ok |= Etop
+ typecheckas2(n)
+ goto ret
+
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ OEMPTY,
+ OGOTO,
+ OXFALL,
+ OVARKILL:
+ ok |= Etop
+ goto ret
+
+ case OLABEL:
+ ok |= Etop
+ decldepth++
+ goto ret
+
+ case ODEFER:
+ ok |= Etop
+ typecheck(&n.Left, Etop|Erv)
+ if !(n.Left.Diag != 0) {
+ checkdefergo(n)
+ }
+ goto ret
+
+ case OPROC:
+ ok |= Etop
+ typecheck(&n.Left, Etop|Eproc|Erv)
+ checkdefergo(n)
+ goto ret
+
+ case OFOR:
+ ok |= Etop
+ typechecklist(n.Ninit, Etop)
+ decldepth++
+ typecheck(&n.Ntest, Erv)
+ if n.Ntest != nil {
+ t = n.Ntest.Type
+ if t != nil && t.Etype != TBOOL {
+ Yyerror("non-bool %v used as for condition", Nconv(n.Ntest, obj.FmtLong))
+ }
+ }
+ typecheck(&n.Nincr, Etop)
+ typechecklist(n.Nbody, Etop)
+ decldepth--
+ goto ret
+
+ case OIF:
+ ok |= Etop
+ typechecklist(n.Ninit, Etop)
+ typecheck(&n.Ntest, Erv)
+ if n.Ntest != nil {
+ t = n.Ntest.Type
+ if t != nil && t.Etype != TBOOL {
+ Yyerror("non-bool %v used as if condition", Nconv(n.Ntest, obj.FmtLong))
+ }
+ }
+ typechecklist(n.Nbody, Etop)
+ typechecklist(n.Nelse, Etop)
+ goto ret
+
+ case ORETURN:
+ ok |= Etop
+ if count(n.List) == 1 {
+ typechecklist(n.List, Erv|Efnstruct)
+ } else {
+ typechecklist(n.List, Erv)
+ }
+ if Curfn == nil {
+ Yyerror("return outside function")
+ goto error
+ }
+
+ if Curfn.Type.Outnamed != 0 && n.List == nil {
+ goto ret
+ }
+ typecheckaste(ORETURN, nil, 0, getoutargx(Curfn.Type), n.List, "return argument")
+ goto ret
+
+ case ORETJMP:
+ ok |= Etop
+ goto ret
+
+ case OSELECT:
+ ok |= Etop
+ typecheckselect(n)
+ goto ret
+
+ case OSWITCH:
+ ok |= Etop
+ typecheckswitch(n)
+ goto ret
+
+ case ORANGE:
+ ok |= Etop
+ typecheckrange(n)
+ goto ret
+
+ case OTYPESW:
+ Yyerror("use of .(type) outside type switch")
+ goto error
+
+ case OXCASE:
+ ok |= Etop
+ typechecklist(n.List, Erv)
+ typechecklist(n.Nbody, Etop)
+ goto ret
+
+ case ODCLFUNC:
+ ok |= Etop
+ typecheckfunc(n)
+ goto ret
+
+ case ODCLCONST:
+ ok |= Etop
+ typecheck(&n.Left, Erv)
+ goto ret
+
+ case ODCLTYPE:
+ ok |= Etop
+ typecheck(&n.Left, Etype)
+ if !(incannedimport != 0) {
+ checkwidth(n.Left.Type)
+ }
+ goto ret
+ }
+
+ goto ret
+
+arith:
+ if op == OLSH || op == ORSH {
+ goto shift
+ }
+
+ // ideal mixed with non-ideal
+ defaultlit2(&l, &r, 0)
+
+ n.Left = l
+ n.Right = r
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ t = l.Type
+ if t.Etype == TIDEAL {
+ t = r.Type
+ }
+ et = int(t.Etype)
+ if et == TIDEAL {
+ et = TINT
+ }
+ aop = 0
+ if iscmp[n.Op] != 0 && t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ if r.Type.Etype != TBLANK {
+ aop = assignop(l.Type, r.Type, nil)
+ if aop != 0 {
+ if Isinter(r.Type) != 0 && !(Isinter(l.Type) != 0) && algtype1(l.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(l.Type))
+ goto error
+ }
+
+ dowidth(l.Type)
+ if Isinter(r.Type) == Isinter(l.Type) || l.Type.Width >= 1<<16 {
+ l = Nod(aop, l, nil)
+ l.Type = r.Type
+ l.Typecheck = 1
+ n.Left = l
+ }
+
+ t = r.Type
+ goto converted
+ }
+ }
+
+ if l.Type.Etype != TBLANK {
+ aop = assignop(r.Type, l.Type, nil)
+ if aop != 0 {
+ if Isinter(l.Type) != 0 && !(Isinter(r.Type) != 0) && algtype1(r.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(r.Type))
+ goto error
+ }
+
+ dowidth(r.Type)
+ if Isinter(r.Type) == Isinter(l.Type) || r.Type.Width >= 1<<16 {
+ r = Nod(aop, r, nil)
+ r.Type = l.Type
+ r.Typecheck = 1
+ n.Right = r
+ }
+
+ t = l.Type
+ }
+ }
+
+ converted:
+ et = int(t.Etype)
+ }
+
+ if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+ defaultlit2(&l, &r, 1)
+ if n.Op == OASOP && n.Implicit != 0 {
+ Yyerror("invalid operation: %v (non-numeric type %v)", Nconv(n, 0), Tconv(l.Type, 0))
+ goto error
+ }
+
+ if Isinter(r.Type) == Isinter(l.Type) || aop == 0 {
+ Yyerror("invalid operation: %v (mismatched types %v and %v)", Nconv(n, 0), Tconv(l.Type, 0), Tconv(r.Type, 0))
+ goto error
+ }
+ }
+
+ if !(okfor[op][et] != 0) {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(t))
+ goto error
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if Isfixedarray(l.Type) != 0 && algtype1(l.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (%v cannot be compared)", Nconv(n, 0), Tconv(l.Type, 0))
+ goto error
+ }
+
+ if Isslice(l.Type) != 0 && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ Yyerror("invalid operation: %v (slice can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TMAP && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ Yyerror("invalid operation: %v (map can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TFUNC && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ Yyerror("invalid operation: %v (func can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TSTRUCT && algtype1(l.Type, &badtype) == ANOEQ {
+ Yyerror("invalid operation: %v (struct containing %v cannot be compared)", Nconv(n, 0), Tconv(badtype, 0))
+ goto error
+ }
+
+ t = l.Type
+ if iscmp[n.Op] != 0 {
+ evconst(n)
+ t = idealbool
+ if n.Op != OLITERAL {
+ defaultlit2(&l, &r, 1)
+ n.Left = l
+ n.Right = r
+ }
+ } else if n.Op == OANDAND || n.Op == OOROR {
+ if l.Type == r.Type {
+ t = l.Type
+ } else if l.Type == idealbool {
+ t = r.Type
+ } else if r.Type == idealbool {
+ t = l.Type
+ }
+ } else
+ // non-comparison operators on ideal bools should make them lose their ideal-ness
+ if t == idealbool {
+ t = Types[TBOOL]
+ }
+
+ if et == TSTRING {
+ if iscmp[n.Op] != 0 {
+ n.Etype = n.Op
+ n.Op = OCMPSTR
+ } else if n.Op == OADD {
+ // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ n.Op = OADDSTR
+
+ if l.Op == OADDSTR {
+ n.List = l.List
+ } else {
+ n.List = list1(l)
+ }
+ if r.Op == OADDSTR {
+ n.List = concat(n.List, r.List)
+ } else {
+ n.List = list(n.List, r)
+ }
+ n.Left = nil
+ n.Right = nil
+ }
+ }
+
+ if et == TINTER {
+ if l.Op == OLITERAL && l.Val.Ctype == CTNIL {
+ // swap for back end
+ n.Left = r
+
+ n.Right = l
+ } else if r.Op == OLITERAL && r.Val.Ctype == CTNIL {
+ } else // leave alone for back end
+ if Isinter(r.Type) == Isinter(l.Type) {
+ n.Etype = n.Op
+ n.Op = OCMPIFACE
+ }
+ }
+
+ if (op == ODIV || op == OMOD) && Isconst(r, CTINT) != 0 {
+ if mpcmpfixc(r.Val.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ goto error
+ }
+ }
+
+ n.Type = t
+ goto ret
+
+shift:
+ defaultlit(&r, Types[TUINT])
+ n.Right = r
+ t = r.Type
+ if !(Isint[t.Etype] != 0) || Issigned[t.Etype] != 0 {
+ Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ t = l.Type
+ if t != nil && t.Etype != TIDEAL && !(Isint[t.Etype] != 0) {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.Type = l.Type
+
+ goto ret
+
+doconv:
+ ok |= Erv
+ saveorignode(n)
+ typecheck(&n.Left, Erv|top&(Eindir|Eiota))
+ convlit1(&n.Left, n.Type, 1)
+ t = n.Left.Type
+ if t == nil || n.Type == nil {
+ goto error
+ }
+ n.Op = uint8(convertop(t, n.Type, &why))
+ if (n.Op) == 0 {
+ if !(n.Diag != 0) && !(n.Type.Broke != 0) {
+ Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), Tconv(n.Type, 0), why)
+ n.Diag = 1
+ }
+
+ n.Op = OCONV
+ }
+
+ switch n.Op {
+ case OCONVNOP:
+ if n.Left.Op == OLITERAL && n.Type != Types[TBOOL] {
+ r = Nod(OXXX, nil, nil)
+ n.Op = OCONV
+ n.Orig = r
+ *r = *n
+ n.Op = OLITERAL
+ n.Val = n.Left.Val
+ }
+
+ // do not use stringtoarraylit.
+ // generated code and compiler memory footprint is better without it.
+ case OSTRARRAYBYTE:
+ break
+
+ case OSTRARRAYRUNE:
+ if n.Left.Op == OLITERAL {
+ stringtoarraylit(&n)
+ }
+ }
+
+ goto ret
+
+ret:
+ t = n.Type
+ if t != nil && !(t.Funarg != 0) && n.Op != OTYPE {
+ switch t.Etype {
+ case TFUNC, // might have TANY; wait until its called
+ TANY,
+ TFORW,
+ TIDEAL,
+ TNIL,
+ TBLANK:
+ break
+
+ default:
+ checkwidth(t)
+ }
+ }
+
+ if safemode != 0 && !(incannedimport != 0) && !(importpkg != nil) && !(compiling_wrappers != 0) && t != nil && t.Etype == TUNSAFEPTR {
+ Yyerror("cannot use unsafe.Pointer")
+ }
+
+ evconst(n)
+ if n.Op == OTYPE && !(top&Etype != 0) {
+ Yyerror("type %v is not an expression", Tconv(n.Type, 0))
+ goto error
+ }
+
+ if top&(Erv|Etype) == Etype && n.Op != OTYPE {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ goto error
+ }
+
+ // TODO(rsc): simplify
+ if (top&(Ecall|Erv|Etype) != 0) && !(top&Etop != 0) && !(ok&(Erv|Etype|Ecall) != 0) {
+ Yyerror("%v used as value", Nconv(n, 0))
+ goto error
+ }
+
+ if (top&Etop != 0) && !(top&(Ecall|Erv|Etype) != 0) && !(ok&Etop != 0) {
+ if n.Diag == 0 {
+ Yyerror("%v evaluated but not used", Nconv(n, 0))
+ n.Diag = 1
+ }
+
+ goto error
+ }
+
+ /* TODO
+ if(n->type == T)
+ fatal("typecheck nil type");
+ */
+ goto out
+
+badcall1:
+ Yyerror("invalid argument %v for %v", Nconv(n.Left, obj.FmtLong), Oconv(int(n.Op), 0))
+ goto error
+
+error:
+ n.Type = nil
+
+out:
+ *np = n
+}
+
+func checksliceindex(l *Node, r *Node, tp *Type) int {
+ var t *Type
+
+ t = r.Type
+ if t == nil {
+ return -1
+ }
+ if !(Isint[t.Etype] != 0) {
+ Yyerror("invalid slice index %v (type %v)", Nconv(r, 0), Tconv(t, 0))
+ return -1
+ }
+
+ if r.Op == OLITERAL {
+ if Mpgetfix(r.Val.U.Xval) < 0 {
+ Yyerror("invalid slice index %v (index must be non-negative)", Nconv(r, 0))
+ return -1
+ } else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val.U.Xval) > tp.Bound {
+ Yyerror("invalid slice index %v (out of bounds for %d-element array)", Nconv(r, 0), tp.Bound)
+ return -1
+ } else if Isconst(l, CTSTR) != 0 && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
+ Yyerror("invalid slice index %v (out of bounds for %d-byte string)", Nconv(r, 0), len(l.Val.U.Sval.S))
+ return -1
+ } else if Mpcmpfixfix(r.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("invalid slice index %v (index too large)", Nconv(r, 0))
+ return -1
+ }
+ }
+
+ return 0
+}
+
+func checksliceconst(lo *Node, hi *Node) int {
+ if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val.U.Xval, hi.Val.U.Xval) > 0 {
+ Yyerror("invalid slice index: %v > %v", Nconv(lo, 0), Nconv(hi, 0))
+ return -1
+ }
+
+ return 0
+}
+
+func checkdefergo(n *Node) {
+ var what string
+
+ what = "defer"
+ if n.Op == OPROC {
+ what = "go"
+ }
+
+ switch n.Left.Op {
+ // ok
+ case OCALLINTER,
+ OCALLMETH,
+ OCALLFUNC,
+ OCLOSE,
+ OCOPY,
+ ODELETE,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ ORECOVER:
+ return
+
+ case OAPPEND,
+ OCAP,
+ OCOMPLEX,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ OMAKESLICE,
+ OMAKECHAN,
+ OMAKEMAP,
+ ONEW,
+ OREAL,
+ OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+ break
+ }
+ Yyerror("%s discards result of %v", what, Nconv(n.Left, 0))
+ return
+ }
+
+ // type is broken or missing, most likely a method call on a broken type
+ // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+ if n.Left.Type == nil || n.Left.Type.Broke != 0 {
+ return
+ }
+
+ if !(n.Diag != 0) {
+ // The syntax made sure it was a call, so this must be
+ // a conversion.
+ n.Diag = 1
+
+ Yyerror("%s requires function call, not conversion", what)
+ }
+}
+
+func implicitstar(nn **Node) {
+ var t *Type
+ var n *Node
+
+ // insert implicit * if needed for fixed array
+ n = *nn
+
+ t = n.Type
+ if t == nil || !(Isptr[t.Etype] != 0) {
+ return
+ }
+ t = t.Type
+ if t == nil {
+ return
+ }
+ if !(Isfixedarray(t) != 0) {
+ return
+ }
+ n = Nod(OIND, n, nil)
+ n.Implicit = 1
+ typecheck(&n, Erv)
+ *nn = n
+}
+
+func onearg(n *Node, f string, args ...interface{}) int {
+ var p string
+
+ if n.Left != nil {
+ return 0
+ }
+ if n.List == nil {
+ p = fmt.Sprintf(f, args...)
+ Yyerror("missing argument to %s: %v", p, Nconv(n, 0))
+ return -1
+ }
+
+ if n.List.Next != nil {
+ p = fmt.Sprintf(f, args...)
+ Yyerror("too many arguments to %s: %v", p, Nconv(n, 0))
+ n.Left = n.List.N
+ n.List = nil
+ return -1
+ }
+
+ n.Left = n.List.N
+ n.List = nil
+ return 0
+}
+
+func twoarg(n *Node) int {
+ if n.Left != nil {
+ return 0
+ }
+ if n.List == nil {
+ Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ return -1
+ }
+
+ n.Left = n.List.N
+ if n.List.Next == nil {
+ Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ n.List = nil
+ return -1
+ }
+
+ if n.List.Next.Next != nil {
+ Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ n.List = nil
+ return -1
+ }
+
+ n.Right = n.List.Next.N
+ n.List = nil
+ return 0
+}
+
+func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
+ var r *Type
+
+ r = nil
+ for ; f != nil; f = f.Down {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ Yyerror("ambiguous selector %v", Nconv(errnode, 0))
+ } else if Isptr[t.Etype] != 0 {
+ Yyerror("ambiguous selector (%v).%v", Tconv(t, 0), Sconv(s, 0))
+ } else {
+ Yyerror("ambiguous selector %v.%v", Tconv(t, 0), Sconv(s, 0))
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+func looktypedot(n *Node, t *Type, dostrcmp int) int {
+ var f1 *Type
+ var f2 *Type
+ var s *Sym
+
+ s = n.Right.Sym
+
+ if t.Etype == TINTER {
+ f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+ if f1 == nil {
+ return 0
+ }
+
+ n.Right = methodname(n.Right, t)
+ n.Xoffset = f1.Width
+ n.Type = f1.Type
+ n.Op = ODOTINTER
+ return 1
+ }
+
+ // Find the base type: methtype will fail if t
+ // is not of the form T or *T.
+ f2 = methtype(t, 0)
+
+ if f2 == nil {
+ return 0
+ }
+
+ expandmeth(f2)
+ f2 = lookdot1(n, s, f2, f2.Xmethod, dostrcmp)
+ if f2 == nil {
+ return 0
+ }
+
+ // disallow T.m if m requires *T receiver
+ if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && !(Isptr[t.Etype] != 0) && f2.Embedded != 2 && !(isifacemethod(f2.Type) != 0) {
+ Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", Nconv(n, 0), Tconv(t, 0), Sconv(f2.Sym, obj.FmtShort))
+ return 0
+ }
+
+ n.Right = methodname(n.Right, t)
+ n.Xoffset = f2.Width
+ n.Type = f2.Type
+ n.Op = ODOTMETH
+ return 1
+}
+
+func derefall(t *Type) *Type {
+ for t != nil && int(t.Etype) == Tptr {
+ t = t.Type
+ }
+ return t
+}
+
+func lookdot(n *Node, t *Type, dostrcmp int) int {
+ var f1 *Type
+ var f2 *Type
+ var tt *Type
+ var rcvr *Type
+ var s *Sym
+
+ s = n.Right.Sym
+
+ dowidth(t)
+ f1 = nil
+ if t.Etype == TSTRUCT || t.Etype == TINTER {
+ f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+ }
+
+ f2 = nil
+ if n.Left.Type == t || n.Left.Type.Sym == nil {
+ f2 = methtype(t, 0)
+ if f2 != nil {
+ // Use f2->method, not f2->xmethod: adddot has
+ // already inserted all the necessary embedded dots.
+ f2 = lookdot1(n, s, f2, f2.Method, dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if f2 != nil {
+ Yyerror("%v is both field and method", Sconv(n.Right.Sym, 0))
+ }
+ if f1.Width == BADWIDTH {
+ Fatal("lookdot badwidth %v %p", Tconv(f1, 0), f1)
+ }
+ n.Xoffset = f1.Width
+ n.Type = f1.Type
+ n.Paramfld = f1
+ if t.Etype == TINTER {
+ if Isptr[n.Left.Type.Etype] != 0 {
+ n.Left = Nod(OIND, n.Left, nil) // implicitstar
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ }
+
+ n.Op = ODOTINTER
+ }
+
+ return 1
+ }
+
+ if f2 != nil {
+ tt = n.Left.Type
+ dowidth(tt)
+ rcvr = getthisx(f2.Type).Type.Type
+ if !Eqtype(rcvr, tt) {
+ if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
+ checklvalue(n.Left, "call pointer method on")
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ } else if int(tt.Etype) == Tptr && int(rcvr.Etype) != Tptr && Eqtype(tt.Type, rcvr) {
+ n.Left = Nod(OIND, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ } else if int(tt.Etype) == Tptr && int(tt.Type.Etype) == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
+ Yyerror("calling method %v with receiver %v requires explicit dereference", Nconv(n.Right, 0), Nconv(n.Left, obj.FmtLong))
+ for int(tt.Etype) == Tptr {
+ // Stop one level early for method with pointer receiver.
+ if int(rcvr.Etype) == Tptr && int(tt.Type.Etype) != Tptr {
+ break
+ }
+ n.Left = Nod(OIND, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ tt = tt.Type
+ }
+ } else {
+ Fatal("method mismatch: %v for %v", Tconv(rcvr, 0), Tconv(tt, 0))
+ }
+ }
+
+ n.Right = methodname(n.Right, n.Left.Type)
+ n.Xoffset = f2.Width
+ n.Type = f2.Type
+
+ // print("lookdot found [%p] %T\n", f2->type, f2->type);
+ n.Op = ODOTMETH
+
+ return 1
+ }
+
+ return 0
+}
+
+func nokeys(l *NodeList) int {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ return 0
+ }
+ }
+ return 1
+}
+
+func hasddd(t *Type) int {
+ var tl *Type
+
+ for tl = t.Type; tl != nil; tl = tl.Down {
+ if tl.Isddd != 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func downcount(t *Type) int {
+ var tl *Type
+ var n int
+
+ n = 0
+ for tl = t.Type; tl != nil; tl = tl.Down {
+ n++
+ }
+
+ return n
+}
+
+/*
+ * typecheck assignment: type list = expression list
+ */
+func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, desc string) {
+ var t *Type
+ var tl *Type
+ var tn *Type
+ var n *Node
+ var lno int
+ var why string
+ var n1 int
+ var n2 int
+
+ lno = int(lineno)
+
+ if tstruct.Broke != 0 {
+ goto out
+ }
+
+ n = nil
+ if nl != nil && nl.Next == nil {
+ n = nl.N
+ if n.Type != nil {
+ if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
+ if !(hasddd(tstruct) != 0) {
+ n1 = downcount(tstruct)
+ n2 = downcount(n.Type)
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+
+ tn = n.Type.Type
+ for tl = tstruct.Type; tl != nil; tl = tl.Down {
+ if tl.Isddd != 0 {
+ for ; tn != nil; tn = tn.Down {
+ if assignop(tn.Type, tl.Type.Type, &why) == 0 {
+ if call != nil {
+ Yyerror("cannot use %v as type %v in argument to %v%s", Tconv(tn.Type, 0), Tconv(tl.Type.Type, 0), Nconv(call, 0), why)
+ } else {
+ Yyerror("cannot use %v as type %v in %s%s", Tconv(tn.Type, 0), Tconv(tl.Type.Type, 0), desc, why)
+ }
+ }
+ }
+
+ goto out
+ }
+
+ if tn == nil {
+ goto notenough
+ }
+ if assignop(tn.Type, tl.Type, &why) == 0 {
+ if call != nil {
+ Yyerror("cannot use %v as type %v in argument to %v%s", Tconv(tn.Type, 0), Tconv(tl.Type, 0), Nconv(call, 0), why)
+ } else {
+ Yyerror("cannot use %v as type %v in %s%s", Tconv(tn.Type, 0), Tconv(tl.Type, 0), desc, why)
+ }
+ }
+
+ tn = tn.Down
+ }
+
+ if tn != nil {
+ goto toomany
+ }
+ goto out
+ }
+ }
+ }
+
+ n1 = downcount(tstruct)
+ n2 = count(nl)
+ if !(hasddd(tstruct) != 0) {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if !(isddd != 0) {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ for tl = tstruct.Type; tl != nil; tl = tl.Down {
+ t = tl.Type
+ if tl.Isddd != 0 {
+ if isddd != 0 {
+ if nl == nil {
+ goto notenough
+ }
+ if nl.Next != nil {
+ goto toomany
+ }
+ n = nl.N
+ setlineno(n)
+ if n.Type != nil {
+ nl.N = assignconv(n, t, desc)
+ }
+ goto out
+ }
+
+ for ; nl != nil; nl = nl.Next {
+ n = nl.N
+ setlineno(nl.N)
+ if n.Type != nil {
+ nl.N = assignconv(n, t.Type, desc)
+ }
+ }
+
+ goto out
+ }
+
+ if nl == nil {
+ goto notenough
+ }
+ n = nl.N
+ setlineno(n)
+ if n.Type != nil {
+ nl.N = assignconv(n, t, desc)
+ }
+ nl = nl.Next
+ }
+
+ if nl != nil {
+ goto toomany
+ }
+ if isddd != 0 {
+ if call != nil {
+ Yyerror("invalid use of ... in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("invalid use of ... in %v", Oconv(int(op), 0))
+ }
+ }
+
+out:
+ lineno = int32(lno)
+ return
+
+notenough:
+ if n == nil || !(n.Diag != 0) {
+ if call != nil {
+ Yyerror("not enough arguments in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("not enough arguments to %v", Oconv(int(op), 0))
+ }
+ if n != nil {
+ n.Diag = 1
+ }
+ }
+
+ goto out
+
+toomany:
+ if call != nil {
+ Yyerror("too many arguments in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("too many arguments to %v", Oconv(int(op), 0))
+ }
+ goto out
+}
+
+/*
+ * type check composite
+ */
+func fielddup(n *Node, hash []*Node) {
+ var h uint
+ var s string
+ var a *Node
+
+ if n.Op != ONAME {
+ Fatal("fielddup: not ONAME")
+ }
+ s = n.Sym.Name
+ h = uint(stringhash(s) % uint32(len(hash)))
+ for a = hash[h]; a != nil; a = a.Ntest {
+ if a.Sym.Name == s {
+ Yyerror("duplicate field name in struct literal: %s", s)
+ return
+ }
+ }
+
+ n.Ntest = hash[h]
+ hash[h] = n
+}
+
+func keydup(n *Node, hash []*Node) {
+ var h uint
+ var b uint32
+ var d float64
+ var i int
+ var a *Node
+ var orign *Node
+ var cmp Node
+ var s string
+
+ orign = n
+ if n.Op == OCONVIFACE {
+ n = n.Left
+ }
+ evconst(n)
+ if n.Op != OLITERAL {
+ return // we dont check variables
+ }
+
+ switch n.Val.Ctype {
+ default: // unknown, bool, nil
+ b = 23
+
+ case CTINT,
+ CTRUNE:
+ b = uint32(Mpgetfix(n.Val.U.Xval))
+
+ case CTFLT:
+ d = mpgetflt(n.Val.U.Fval)
+ x := math.Float64bits(d)
+ for i := 0; i < 8; i++ {
+ b = b*PRIME1 + uint32(x&0xFF)
+ x >>= 8
+ }
+
+ case CTSTR:
+ b = 0
+ s = n.Val.U.Sval.S
+ for i = len(n.Val.U.Sval.S); i > 0; i-- {
+ b = b*PRIME1 + uint32(s[0])
+ s = s[1:]
+ }
+ }
+
+ h = uint(b % uint32(len(hash)))
+ cmp = Node{}
+ for a = hash[h]; a != nil; a = a.Ntest {
+ cmp.Op = OEQ
+ cmp.Left = n
+ b = 0
+ if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
+ if Eqtype(a.Left.Type, n.Type) {
+ cmp.Right = a.Left
+ evconst(&cmp)
+ b = uint32(cmp.Val.U.Bval)
+ }
+ } else if Eqtype(a.Type, n.Type) {
+ cmp.Right = a
+ evconst(&cmp)
+ b = uint32(cmp.Val.U.Bval)
+ }
+
+ if b != 0 {
+ Yyerror("duplicate key %v in map literal", Nconv(n, 0))
+ return
+ }
+ }
+
+ orign.Ntest = hash[h]
+ hash[h] = orign
+}
+
+func indexdup(n *Node, hash []*Node) {
+ var h uint
+ var a *Node
+ var b uint32
+ var c uint32
+
+ if n.Op != OLITERAL {
+ Fatal("indexdup: not OLITERAL")
+ }
+
+ b = uint32(Mpgetfix(n.Val.U.Xval))
+ h = uint(b % uint32(len(hash)))
+ for a = hash[h]; a != nil; a = a.Ntest {
+ c = uint32(Mpgetfix(a.Val.U.Xval))
+ if b == c {
+ Yyerror("duplicate index in array literal: %d", b)
+ return
+ }
+ }
+
+ n.Ntest = hash[h]
+ hash[h] = n
+}
+
+func prime(h uint32, sr uint32) int {
+ var n uint32
+
+ for n = 3; n <= sr; n += 2 {
+ if h%n == 0 {
+ return 0
+ }
+ }
+ return 1
+}
+
+func inithash(n *Node, autohash []*Node) []*Node {
+ var h uint32
+ var sr uint32
+ var ll *NodeList
+ var i int
+
+ // count the number of entries
+ h = 0
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ h++
+ }
+
+ // if the auto hash table is
+ // large enough use it.
+ if h <= uint32(len(autohash)) {
+ for i := range autohash {
+ autohash[i] = nil
+ }
+ return autohash
+ }
+
+ // make hash size odd and 12% larger than entries
+ h += h / 8
+
+ h |= 1
+
+ // calculate sqrt of h
+ sr = h / 2
+
+ for i = 0; i < 5; i++ {
+ sr = (sr + h/sr) / 2
+ }
+
+ // check for primeality
+ for !(prime(h, sr) != 0) {
+ h += 2
+ }
+
+ // build and return a throw-away hash table
+ return make([]*Node, h)
+}
+
+func iscomptype(t *Type) int {
+ switch t.Etype {
+ case TARRAY,
+ TSTRUCT,
+ TMAP:
+ return 1
+
+ case TPTR32,
+ TPTR64:
+ switch t.Type.Etype {
+ case TARRAY,
+ TSTRUCT,
+ TMAP:
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func pushtype(n *Node, t *Type) {
+ if n == nil || n.Op != OCOMPLIT || !(iscomptype(t) != 0) {
+ return
+ }
+
+ if n.Right == nil {
+ n.Right = typenod(t)
+ n.Implicit = 1 // don't print
+ n.Right.Implicit = 1 // * is okay
+ } else if Debug['s'] != 0 {
+ typecheck(&n.Right, Etype)
+ if n.Right.Type != nil && Eqtype(n.Right.Type, t) {
+ fmt.Printf("%v: redundant type: %v\n", n.Line(), Tconv(t, 0))
+ }
+ }
+}
+
+func typecheckcomplit(np **Node) {
+ var bad int
+ var i int
+ var nerr int
+ var length int64
+ var l *Node
+ var n *Node
+ var norig *Node
+ var r *Node
+ var hash []*Node
+ var ll *NodeList
+ var t *Type
+ var f *Type
+ var s *Sym
+ var s1 *Sym
+ var lno int32
+ var autohash [101]*Node
+
+ n = *np
+ lno = lineno
+
+ if n.Right == nil {
+ if n.List != nil {
+ setlineno(n.List.N)
+ }
+ Yyerror("missing type in composite literal")
+ goto error
+ }
+
+ // Save original node (including n->right)
+ norig = Nod(int(n.Op), nil, nil)
+
+ *norig = *n
+
+ setlineno(n.Right)
+ l = typecheck(&n.Right, Etype|Ecomplit) /* sic */
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ nerr = nerrors
+ n.Type = t
+
+ if Isptr[t.Etype] != 0 {
+ // For better or worse, we don't allow pointers as the composite literal type,
+ // except when using the &T syntax, which sets implicit on the OIND.
+ if !(n.Right.Implicit != 0) {
+ Yyerror("invalid pointer type %v for composite literal (use &%v instead)", Tconv(t, 0), Tconv(t.Type, 0))
+ goto error
+ }
+
+ // Also, the underlying type must be a struct, map, slice, or array.
+ if !(iscomptype(t) != 0) {
+ Yyerror("invalid pointer type %v for composite literal", Tconv(t, 0))
+ goto error
+ }
+
+ t = t.Type
+ }
+
+ switch t.Etype {
+ default:
+ Yyerror("invalid type for composite literal: %v", Tconv(t, 0))
+ n.Type = nil
+
+ case TARRAY:
+ hash = inithash(n, autohash[:])
+
+ length = 0
+ i = 0
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ l = Nod(OKEY, Nodintconst(int64(i)), l)
+ l.Left.Type = Types[TINT]
+ l.Left.Typecheck = 1
+ ll.N = l
+ }
+
+ typecheck(&l.Left, Erv)
+ evconst(l.Left)
+ i = nonnegconst(l.Left)
+ if i < 0 && !(l.Left.Diag != 0) {
+ Yyerror("array index must be non-negative integer constant")
+ l.Left.Diag = 1
+ i = -(1 << 30) // stay negative for a while
+ }
+
+ if i >= 0 {
+ indexdup(l.Left, hash)
+ }
+ i++
+ if int64(i) > length {
+ length = int64(i)
+ if t.Bound >= 0 && length > t.Bound {
+ setlineno(l)
+ Yyerror("array index %d out of bounds [0:%d]", length-1, t.Bound)
+ t.Bound = -1 // no more errors
+ }
+ }
+
+ r = l.Right
+ pushtype(r, t.Type)
+ typecheck(&r, Erv)
+ defaultlit(&r, t.Type)
+ l.Right = assignconv(r, t.Type, "array element")
+ }
+
+ if t.Bound == -100 {
+ t.Bound = length
+ }
+ if t.Bound < 0 {
+ n.Right = Nodintconst(length)
+ }
+ n.Op = OARRAYLIT
+
+ case TMAP:
+ hash = inithash(n, autohash[:])
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ typecheck(&ll.N, Erv)
+ Yyerror("missing key in map literal")
+ continue
+ }
+
+ typecheck(&l.Left, Erv)
+ defaultlit(&l.Left, t.Down)
+ l.Left = assignconv(l.Left, t.Down, "map key")
+ if l.Left.Op != OCONV {
+ keydup(l.Left, hash)
+ }
+
+ r = l.Right
+ pushtype(r, t.Type)
+ typecheck(&r, Erv)
+ defaultlit(&r, t.Type)
+ l.Right = assignconv(r, t.Type, "map value")
+ }
+
+ n.Op = OMAPLIT
+
+ case TSTRUCT:
+ bad = 0
+ if n.List != nil && nokeys(n.List) != 0 {
+ // simple list of variables
+ f = t.Type
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ setlineno(ll.N)
+ typecheck(&ll.N, Erv)
+ if f == nil {
+ tmp12 := bad
+ bad++
+ if !(tmp12 != 0) {
+ Yyerror("too many values in struct initializer")
+ }
+ continue
+ }
+
+ s = f.Sym
+ if s != nil && !exportname(s.Name) && s.Pkg != localpkg {
+ Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, Tconv(t, 0))
+ }
+
+ // No pushtype allowed here. Must name fields for that.
+ ll.N = assignconv(ll.N, f.Type, "field value")
+
+ ll.N = Nod(OKEY, newname(f.Sym), ll.N)
+ ll.N.Left.Type = f
+ ll.N.Left.Typecheck = 1
+ f = f.Down
+ }
+
+ if f != nil {
+ Yyerror("too few values in struct initializer")
+ }
+ } else {
+ hash = inithash(n, autohash[:])
+
+ // keyed list
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ tmp13 := bad
+ bad++
+ if !(tmp13 != 0) {
+ Yyerror("mixture of field:value and value initializers")
+ }
+ typecheck(&ll.N, Erv)
+ continue
+ }
+
+ s = l.Left.Sym
+ if s == nil {
+ Yyerror("invalid field name %v in struct initializer", Nconv(l.Left, 0))
+ typecheck(&l.Right, Erv)
+ continue
+ }
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ if s.Pkg != localpkg && exportname(s.Name) {
+ s1 = Lookup(s.Name)
+ if s1.Origpkg == s.Pkg {
+ s = s1
+ }
+ }
+
+ f = lookdot1(nil, s, t, t.Type, 0)
+ if f == nil {
+ Yyerror("unknown %v field '%v' in struct literal", Tconv(t, 0), Sconv(s, 0))
+ continue
+ }
+
+ l.Left = newname(s)
+ l.Left.Typecheck = 1
+ l.Left.Type = f
+ s = f.Sym
+ fielddup(newname(s), hash)
+ r = l.Right
+
+ // No pushtype allowed here. Tried and rejected.
+ typecheck(&r, Erv)
+
+ l.Right = assignconv(r, f.Type, "field value")
+ }
+ }
+
+ n.Op = OSTRUCTLIT
+ }
+
+ if nerr != nerrors {
+ goto error
+ }
+
+ n.Orig = norig
+ if Isptr[n.Type.Etype] != 0 {
+ n = Nod(OPTRLIT, n, nil)
+ n.Typecheck = 1
+ n.Type = n.Left.Type
+ n.Left.Type = t
+ n.Left.Typecheck = 1
+ }
+
+ n.Orig = norig
+ *np = n
+ lineno = lno
+ return
+
+error:
+ n.Type = nil
+ *np = n
+ lineno = lno
+}
+
+/*
+ * lvalue etc
+ */
+func islvalue(n *Node) int {
+ switch n.Op {
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) != 0 {
+ return islvalue(n.Left)
+ }
+ if n.Left.Type != nil && n.Left.Type.Etype == TSTRING {
+ return 0
+ }
+ fallthrough
+
+ // fall through
+ case OIND,
+ ODOTPTR,
+ OCLOSUREVAR,
+ OPARAM:
+ return 1
+
+ case ODOT:
+ return islvalue(n.Left)
+
+ case ONAME:
+ if n.Class == PFUNC {
+ return 0
+ }
+ return 1
+ }
+
+ return 0
+}
+
+func checklvalue(n *Node, verb string) {
+ if !(islvalue(n) != 0) {
+ Yyerror("cannot %s %v", verb, Nconv(n, 0))
+ }
+}
+
+func checkassign(stmt *Node, n *Node) {
+ var r *Node
+ var l *Node
+
+ // Variables declared in ORANGE are assigned on every iteration.
+ if n.Defn != stmt || stmt.Op == ORANGE {
+ r = outervalue(n)
+ for l = n; l != r; l = l.Left {
+ l.Assigned = 1
+ if l.Closure != nil {
+ l.Closure.Assigned = 1
+ }
+ }
+
+ l.Assigned = 1
+ if l.Closure != nil {
+ l.Closure.Assigned = 1
+ }
+ }
+
+ if islvalue(n) != 0 {
+ return
+ }
+ if n.Op == OINDEXMAP {
+ n.Etype = 1
+ return
+ }
+
+ // have already complained about n being undefined
+ if n.Op == ONONAME {
+ return
+ }
+
+ Yyerror("cannot assign to %v", Nconv(n, 0))
+}
+
+func checkassignlist(stmt *Node, l *NodeList) {
+ for ; l != nil; l = l.Next {
+ checkassign(stmt, l.N)
+ }
+}
+
+// Check whether l and r are the same side effect-free expression,
+// so that it is safe to reuse one instead of computing both.
+func samesafeexpr(l *Node, r *Node) int {
+ if l.Op != r.Op || !Eqtype(l.Type, r.Type) {
+ return 0
+ }
+
+ switch l.Op {
+ case ONAME,
+ OCLOSUREVAR:
+ return bool2int(l == r)
+
+ case ODOT,
+ ODOTPTR:
+ return bool2int(l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left) != 0)
+
+ case OIND:
+ return samesafeexpr(l.Left, r.Left)
+
+ case OINDEX:
+ return bool2int(samesafeexpr(l.Left, r.Left) != 0 && samesafeexpr(l.Right, r.Right) != 0)
+ }
+
+ return 0
+}
+
+/*
+ * type check assignment.
+ * if this assignment is the definition of a var on the left side,
+ * fill in the var's type.
+ */
+func typecheckas(n *Node) {
+ // delicate little dance.
+ // the definition of n may refer to this assignment
+ // as its definition, in which case it will call typecheckas.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+ n.Left = resolve(n.Left)
+
+ if n.Left.Defn != n || n.Left.Ntype != nil {
+ typecheck(&n.Left, Erv|Easgn)
+ }
+
+ typecheck(&n.Right, Erv)
+ checkassign(n, n.Left)
+ if n.Right != nil && n.Right.Type != nil {
+ if n.Left.Type != nil {
+ n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ }
+ }
+
+ if n.Left.Defn == n && n.Left.Ntype == nil {
+ defaultlit(&n.Right, nil)
+ n.Left.Type = n.Right.Type
+ }
+
+ // second half of dance.
+ // now that right is done, typecheck the left
+ // just to get it over with. see dance above.
+ n.Typecheck = 1
+
+ if n.Left.Typecheck == 0 {
+ typecheck(&n.Left, Erv|Easgn)
+ }
+
+ // Recognize slices being updated in place, for better code generation later.
+ // Don't rewrite if using race detector, to avoid needing to teach race detector
+ // about this optimization.
+ if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && !(flag_race != 0) {
+ switch n.Right.Op {
+ // For x = x[0:y], x can be updated in place, without touching pointer.
+ // TODO(rsc): Reenable once it is actually updated in place without touching the pointer.
+ case OSLICE,
+ OSLICE3,
+ OSLICESTR:
+ if false && samesafeexpr(n.Left, n.Right.Left) != 0 && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left) != 0) {
+ n.Right.Reslice = 1
+ }
+
+ // For x = append(x, ...), x can be updated in place when there is capacity,
+ // without touching the pointer; otherwise the emitted code to growslice
+ // can take care of updating the pointer, and only in that case.
+ // TODO(rsc): Reenable once the emitted code does update the pointer.
+ case OAPPEND:
+ if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) != 0 {
+ n.Right.Reslice = 1
+ }
+ }
+ }
+}
+
+func checkassignto(src *Type, dst *Node) {
+ var why string
+
+ if assignop(src, dst.Type, &why) == 0 {
+ Yyerror("cannot assign %v to %v in multiple assignment%s", Tconv(src, 0), Nconv(dst, obj.FmtLong), why)
+ return
+ }
+}
+
+func typecheckas2(n *Node) {
+ var cl int
+ var cr int
+ var ll *NodeList
+ var lr *NodeList
+ var l *Node
+ var r *Node
+ var s Iter
+ var t *Type
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ // delicate little dance.
+ ll.N = resolve(ll.N)
+
+ if ll.N.Defn != n || ll.N.Ntype != nil {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ cl = count(n.List)
+ cr = count(n.Rlist)
+ if cl > 1 && cr == 1 {
+ typecheck(&n.Rlist.N, Erv|Efnstruct)
+ } else {
+ typechecklist(n.Rlist, Erv)
+ }
+ checkassignlist(n, n.List)
+
+ if cl == cr {
+ // easy
+ ll = n.List
+ lr = n.Rlist
+ for ; ll != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ if ll.N.Type != nil && lr.N.Type != nil {
+ lr.N = assignconv(lr.N, ll.N.Type, "assignment")
+ }
+ if ll.N.Defn == n && ll.N.Ntype == nil {
+ defaultlit(&lr.N, nil)
+ ll.N.Type = lr.N.Type
+ }
+ }
+
+ goto out
+ }
+
+ l = n.List.N
+ r = n.Rlist.N
+
+ // x,y,z = f()
+ if cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OCALLMETH,
+ OCALLINTER,
+ OCALLFUNC:
+ if r.Type.Etype != TSTRUCT || r.Type.Funarg == 0 {
+ break
+ }
+ cr = structcount(r.Type)
+ if cr != cl {
+ goto mismatch
+ }
+ n.Op = OAS2FUNC
+ t = Structfirst(&s, &r.Type)
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if t.Type != nil && ll.N.Type != nil {
+ checkassignto(t.Type, ll.N)
+ }
+ if ll.N.Defn == n && ll.N.Ntype == nil {
+ ll.N.Type = t.Type
+ }
+ t = structnext(&s)
+ }
+
+ goto out
+ }
+ }
+
+ // x, ok = y
+ if cl == 2 && cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OINDEXMAP,
+ ORECV,
+ ODOTTYPE:
+ switch r.Op {
+ case OINDEXMAP:
+ n.Op = OAS2MAPR
+
+ case ORECV:
+ n.Op = OAS2RECV
+
+ case ODOTTYPE:
+ n.Op = OAS2DOTTYPE
+ r.Op = ODOTTYPE2
+ }
+
+ if l.Type != nil {
+ checkassignto(r.Type, l)
+ }
+ if l.Defn == n {
+ l.Type = r.Type
+ }
+ l = n.List.Next.N
+ if l.Type != nil && l.Type.Etype != TBOOL {
+ checkassignto(Types[TBOOL], l)
+ }
+ if l.Defn == n && l.Ntype == nil {
+ l.Type = Types[TBOOL]
+ }
+ goto out
+ }
+ }
+
+mismatch:
+ Yyerror("assignment count mismatch: %d = %d", cl, cr)
+
+ // second half of dance
+out:
+ n.Typecheck = 1
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Typecheck == 0 {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+}
+
+/*
+ * type check function definition
+ */
+func typecheckfunc(n *Node) {
+ var t *Type
+ var rcvr *Type
+ var l *NodeList
+
+ typecheck(&n.Nname, Erv|Easgn)
+ t = n.Nname.Type
+ if t == nil {
+ return
+ }
+ n.Type = t
+ t.Nname = n.Nname
+ rcvr = getthisx(t).Type
+ if rcvr != nil && n.Shortname != nil && !isblank(n.Shortname) {
+ addmethod(n.Shortname.Sym, t, true, n.Nname.Nointerface)
+ }
+
+ for l = n.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+ l.N.Decldepth = 1
+ }
+ }
+}
+
+func stringtoarraylit(np **Node) {
+ n := *np
+ if n.Left.Op != OLITERAL || n.Left.Val.Ctype != CTSTR {
+ Fatal("stringtoarraylit %N", n)
+ }
+
+ s := n.Left.Val.U.Sval.S
+ var l *NodeList
+ if n.Type.Type.Etype == TUINT8 {
+ // []byte
+ for i := 0; i < len(s); i++ {
+ l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(s[0]))))
+ }
+ } else {
+ // []rune
+ i := 0
+ for _, r := range s {
+ l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(r))))
+ i++
+ }
+ }
+
+ nn := Nod(OCOMPLIT, nil, typenod(n.Type))
+ nn.List = l
+ typecheck(&nn, Erv)
+ *np = nn
+}
+
+var ntypecheckdeftype int
+
+var methodqueue *NodeList
+
+func domethod(n *Node) {
+ var nt *Node
+ var t *Type
+
+ nt = n.Type.Nname
+ typecheck(&nt, Etype)
+ if nt.Type == nil {
+ // type check failed; leave empty func
+ n.Type.Etype = TFUNC
+
+ n.Type.Nod = nil
+ return
+ }
+
+ // If we have
+ // type I interface {
+ // M(_ int)
+ // }
+ // then even though I.M looks like it doesn't care about the
+ // value of its argument, a specific implementation of I may
+ // care. The _ would suppress the assignment to that argument
+ // while generating a call, so remove it.
+ for t = getinargx(nt.Type).Type; t != nil; t = t.Down {
+ if t.Sym != nil && t.Sym.Name == "_" {
+ t.Sym = nil
+ }
+ }
+
+ *n.Type = *nt.Type
+ n.Type.Nod = nil
+ checkwidth(n.Type)
+}
+
+var mapqueue *NodeList
+
+func copytype(n *Node, t *Type) {
+ var maplineno int
+ var embedlineno int
+ var lno int
+ var l *NodeList
+
+ if t.Etype == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ t.Copyto = list(t.Copyto, n)
+
+ return
+ }
+
+ maplineno = int(n.Type.Maplineno)
+ embedlineno = int(n.Type.Embedlineno)
+
+ l = n.Type.Copyto
+ *n.Type = *t
+
+ t = n.Type
+ t.Sym = n.Sym
+ t.Local = n.Local
+ t.Vargen = n.Vargen
+ t.Siggen = 0
+ t.Method = nil
+ t.Xmethod = nil
+ t.Nod = nil
+ t.Printed = 0
+ t.Deferwidth = 0
+ t.Copyto = nil
+
+ // Update nodes waiting on this type.
+ for ; l != nil; l = l.Next {
+ copytype(l.N, t)
+ }
+
+ // Double-check use of type as embedded type.
+ lno = int(lineno)
+
+ if embedlineno != 0 {
+ lineno = int32(embedlineno)
+ if Isptr[t.Etype] != 0 {
+ Yyerror("embedded type cannot be a pointer")
+ }
+ }
+
+ lineno = int32(lno)
+
+ // Queue check for map until all the types are done settling.
+ if maplineno != 0 {
+ t.Maplineno = int32(maplineno)
+ mapqueue = list(mapqueue, n)
+ }
+}
+
+func typecheckdeftype(n *Node) {
+ var lno int
+ var t *Type
+ var l *NodeList
+
+ ntypecheckdeftype++
+ lno = int(lineno)
+ setlineno(n)
+ n.Type.Sym = n.Sym
+ n.Typecheck = 1
+ typecheck(&n.Ntype, Etype)
+ t = n.Ntype.Type
+ if t == nil {
+ n.Diag = 1
+ n.Type = nil
+ goto ret
+ }
+
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+
+ // copy new type and clear fields
+ // that don't come along.
+ // anything zeroed here must be zeroed in
+ // typedcl2 too.
+ copytype(n, t)
+
+ret:
+ lineno = int32(lno)
+
+ // if there are no type definitions going on, it's safe to
+ // try to resolve the method types for the interfaces
+ // we just read.
+ if ntypecheckdeftype == 1 {
+ for {
+ l = methodqueue
+ if !(l != nil) {
+ break
+ }
+ methodqueue = nil
+ for ; l != nil; l = l.Next {
+ domethod(l.N)
+ }
+ }
+
+ for l = mapqueue; l != nil; l = l.Next {
+ lineno = l.N.Type.Maplineno
+ maptype(l.N.Type, Types[TBOOL])
+ }
+
+ lineno = int32(lno)
+ }
+
+ ntypecheckdeftype--
+}
+
+func queuemethod(n *Node) {
+ if ntypecheckdeftype == 0 {
+ domethod(n)
+ return
+ }
+
+ methodqueue = list(methodqueue, n)
+}
+
+func typecheckdef(n *Node) *Node {
+ var lno int
+ var nerrors0 int
+ var e *Node
+ var t *Type
+ var l *NodeList
+
+ lno = int(lineno)
+ setlineno(n)
+
+ if n.Op == ONONAME {
+ if !(n.Diag != 0) {
+ n.Diag = 1
+ if n.Lineno != 0 {
+ lineno = n.Lineno
+ }
+
+ // Note: adderrorname looks for this string and
+ // adds context about the outer expression
+ Yyerror("undefined: %v", Sconv(n.Sym, 0))
+ }
+
+ return n
+ }
+
+ if n.Walkdef == 1 {
+ return n
+ }
+
+ l = new(NodeList)
+ l.N = n
+ l.Next = typecheckdefstack
+ typecheckdefstack = l
+
+ if n.Walkdef == 2 {
+ Flusherrors()
+ fmt.Printf("typecheckdef loop:")
+ for l = typecheckdefstack; l != nil; l = l.Next {
+ fmt.Printf(" %v", Sconv(l.N.Sym, 0))
+ }
+ fmt.Printf("\n")
+ Fatal("typecheckdef loop")
+ }
+
+ n.Walkdef = 2
+
+ if n.Type != nil || n.Sym == nil { // builtin or no name
+ goto ret
+ }
+
+ switch n.Op {
+ default:
+ Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ // not really syms
+ case OGOTO,
+ OLABEL:
+ break
+
+ case OLITERAL:
+ if n.Ntype != nil {
+ typecheck(&n.Ntype, Etype)
+ n.Type = n.Ntype.Type
+ n.Ntype = nil
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+ }
+
+ e = n.Defn
+ n.Defn = nil
+ if e == nil {
+ lineno = n.Lineno
+ Dump("typecheckdef nil defn", n)
+ Yyerror("xxx")
+ }
+
+ typecheck(&e, Erv|Eiota)
+ if Isconst(e, CTNIL) != 0 {
+ Yyerror("const initializer cannot be nil")
+ goto ret
+ }
+
+ if e.Type != nil && e.Op != OLITERAL || !(isgoconst(e) != 0) {
+ if !(e.Diag != 0) {
+ Yyerror("const initializer %v is not a constant", Nconv(e, 0))
+ e.Diag = 1
+ }
+
+ goto ret
+ }
+
+ t = n.Type
+ if t != nil {
+ if !(okforconst[t.Etype] != 0) {
+ Yyerror("invalid constant type %v", Tconv(t, 0))
+ goto ret
+ }
+
+ if !(isideal(e.Type) != 0) && !Eqtype(t, e.Type) {
+ Yyerror("cannot use %v as type %v in const initializer", Nconv(e, obj.FmtLong), Tconv(t, 0))
+ goto ret
+ }
+
+ Convlit(&e, t)
+ }
+
+ n.Val = e.Val
+ n.Type = e.Type
+
+ case ONAME:
+ if n.Ntype != nil {
+ typecheck(&n.Ntype, Etype)
+ n.Type = n.Ntype.Type
+
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+ }
+
+ if n.Type != nil {
+ break
+ }
+ if n.Defn == nil {
+ if n.Etype != 0 { // like OPRINTN
+ break
+ }
+ if nsavederrors+nerrors > 0 {
+ // Can have undefined variables in x := foo
+ // that make x have an n->ndefn == nil.
+ // If there are other errors anyway, don't
+ // bother adding to the noise.
+ break
+ }
+
+ Fatal("var without type, init: %v", Sconv(n.Sym, 0))
+ }
+
+ if n.Defn.Op == ONAME {
+ typecheck(&n.Defn, Erv)
+ n.Type = n.Defn.Type
+ break
+ }
+
+ typecheck(&n.Defn, Etop) // fills in n->type
+
+ case OTYPE:
+ if Curfn != nil {
+ defercheckwidth()
+ }
+ n.Walkdef = 1
+ n.Type = typ(TFORW)
+ n.Type.Sym = n.Sym
+ nerrors0 = nerrors
+ typecheckdeftype(n)
+ if n.Type.Etype == TFORW && nerrors > nerrors0 {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ n.Type.Broke = 1
+ }
+
+ if Curfn != nil {
+ resumecheckwidth()
+ }
+
+ // nothing to see here
+ case OPACK:
+ break
+ }
+
+ret:
+ if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) != 0 {
+ Fatal("got %v for %v", Tconv(n.Type, 0), Nconv(n, 0))
+ }
+ if typecheckdefstack.N != n {
+ Fatal("typecheckdefstack mismatch")
+ }
+ l = typecheckdefstack
+ typecheckdefstack = l.Next
+
+ lineno = int32(lno)
+ n.Walkdef = 1
+ return n
+}
+
+func checkmake(t *Type, arg string, n *Node) int {
+ if n.Op == OLITERAL {
+ switch n.Val.Ctype {
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ n.Val = toint(n.Val)
+ if mpcmpfixc(n.Val.U.Xval, 0) < 0 {
+ Yyerror("negative %s argument in make(%v)", arg, Tconv(t, 0))
+ return -1
+ }
+
+ if Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("%s argument too large in make(%v)", arg, Tconv(t, 0))
+ return -1
+ }
+
+ // Delay defaultlit until after we've checked range, to avoid
+ // a redundant "constant NNN overflows int" error.
+ defaultlit(&n, Types[TINT])
+
+ return 0
+
+ default:
+ break
+ }
+ }
+
+ if !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TIDEAL {
+ Yyerror("non-integer %s argument in make(%v) - %v", arg, Tconv(t, 0), Tconv(n.Type, 0))
+ return -1
+ }
+
+ // Defaultlit still necessary for non-constant: n might be 1<<k.
+ defaultlit(&n, Types[TINT])
+
+ return 0
+}
+
+func markbreak(n *Node, implicit *Node) {
+ var lab *Label
+
+ if n == nil {
+ return
+ }
+
+ switch n.Op {
+ case OBREAK:
+ if n.Left == nil {
+ if implicit != nil {
+ implicit.Hasbreak = 1
+ }
+ } else {
+ lab = n.Left.Sym.Label
+ if lab != nil {
+ lab.Def.Hasbreak = 1
+ }
+ }
+
+ case OFOR,
+ OSWITCH,
+ OTYPESW,
+ OSELECT,
+ ORANGE:
+ implicit = n
+ fallthrough
+
+ // fall through
+ default:
+ markbreak(n.Left, implicit)
+
+ markbreak(n.Right, implicit)
+ markbreak(n.Ntest, implicit)
+ markbreak(n.Nincr, implicit)
+ markbreaklist(n.Ninit, implicit)
+ markbreaklist(n.Nbody, implicit)
+ markbreaklist(n.Nelse, implicit)
+ markbreaklist(n.List, implicit)
+ markbreaklist(n.Rlist, implicit)
+ }
+}
+
+func markbreaklist(l *NodeList, implicit *Node) {
+ var n *Node
+ var lab *Label
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ if n.Op == OLABEL && l.Next != nil && n.Defn == l.Next.N {
+ switch n.Defn.Op {
+ case OFOR,
+ OSWITCH,
+ OTYPESW,
+ OSELECT,
+ ORANGE:
+ lab = new(Label)
+ lab.Def = n.Defn
+ n.Left.Sym.Label = lab
+ markbreak(n.Defn, n.Defn)
+ n.Left.Sym.Label = nil
+ l = l.Next
+ continue
+ }
+ }
+
+ markbreak(n, implicit)
+ }
+}
+
+func isterminating(l *NodeList, top int) int {
+ var def int
+ var n *Node
+
+ if l == nil {
+ return 0
+ }
+ if top != 0 {
+ for l.Next != nil && l.N.Op != OLABEL {
+ l = l.Next
+ }
+ markbreaklist(l, nil)
+ }
+
+ for l.Next != nil {
+ l = l.Next
+ }
+ n = l.N
+
+ if n == nil {
+ return 0
+ }
+
+ switch n.Op {
+ // NOTE: OLABEL is treated as a separate statement,
+ // not a separate prefix, so skipping to the last statement
+ // in the block handles the labeled statement case by
+ // skipping over the label. No case OLABEL here.
+
+ case OBLOCK:
+ return isterminating(n.List, 0)
+
+ case OGOTO,
+ ORETURN,
+ ORETJMP,
+ OPANIC,
+ OXFALL:
+ return 1
+
+ case OFOR:
+ if n.Ntest != nil {
+ return 0
+ }
+ if n.Hasbreak != 0 {
+ return 0
+ }
+ return 1
+
+ case OIF:
+ return bool2int(isterminating(n.Nbody, 0) != 0 && isterminating(n.Nelse, 0) != 0)
+
+ case OSWITCH,
+ OTYPESW,
+ OSELECT:
+ if n.Hasbreak != 0 {
+ return 0
+ }
+ def = 0
+ for l = n.List; l != nil; l = l.Next {
+ if !(isterminating(l.N.Nbody, 0) != 0) {
+ return 0
+ }
+ if l.N.List == nil { // default
+ def = 1
+ }
+ }
+
+ if n.Op != OSELECT && !(def != 0) {
+ return 0
+ }
+ return 1
+ }
+
+ return 0
+}
+
+func checkreturn(fn *Node) {
+ if fn.Type.Outtuple != 0 && fn.Nbody != nil {
+ if !(isterminating(fn.Nbody, 1) != 0) {
+ yyerrorl(int(fn.Endlineno), "missing return at end of function")
+ }
+ }
+}
diff --git a/src/cmd/internal/gc/unsafe.go b/src/cmd/internal/gc/unsafe.go
new file mode 100644
index 0000000000..e50ea19837
--- /dev/null
+++ b/src/cmd/internal/gc/unsafe.go
@@ -0,0 +1,178 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * look for
+ * unsafe.Sizeof
+ * unsafe.Offsetof
+ * unsafe.Alignof
+ * rewrite with a constant
+ */
+func unsafenmagic(nn *Node) *Node {
+ var r *Node
+ var n *Node
+ var base *Node
+ var r1 *Node
+ var s *Sym
+ var t *Type
+ var tr *Type
+ var v int64
+ var val Val
+ var fn *Node
+ var args *NodeList
+
+ fn = nn.Left
+ args = nn.List
+
+ if safemode != 0 || fn == nil || fn.Op != ONAME {
+ goto no
+ }
+ s = fn.Sym
+ if s == nil {
+ goto no
+ }
+ if s.Pkg != unsafepkg {
+ goto no
+ }
+
+ if args == nil {
+ Yyerror("missing argument for %v", Sconv(s, 0))
+ goto no
+ }
+
+ r = args.N
+
+ if s.Name == "Sizeof" {
+ typecheck(&r, Erv)
+ defaultlit(&r, nil)
+ tr = r.Type
+ if tr == nil {
+ goto bad
+ }
+ dowidth(tr)
+ v = tr.Width
+ goto yes
+ }
+
+ if s.Name == "Offsetof" {
+ // must be a selector.
+ if r.Op != OXDOT {
+ goto bad
+ }
+
+ // Remember base of selector to find it back after dot insertion.
+ // Since r->left may be mutated by typechecking, check it explicitly
+ // first to track it correctly.
+ typecheck(&r.Left, Erv)
+
+ base = r.Left
+ typecheck(&r, Erv)
+ switch r.Op {
+ case ODOT,
+ ODOTPTR:
+ break
+
+ case OCALLPART:
+ Yyerror("invalid expression %v: argument is a method value", Nconv(nn, 0))
+ v = 0
+ goto ret
+
+ default:
+ goto bad
+ }
+
+ v = 0
+
+ // add offsets for inserted dots.
+ for r1 = r; r1.Left != base; r1 = r1.Left {
+ switch r1.Op {
+ case ODOT:
+ v += r1.Xoffset
+
+ case ODOTPTR:
+ Yyerror("invalid expression %v: selector implies indirection of embedded %v", Nconv(nn, 0), Nconv(r1.Left, 0))
+ goto ret
+
+ default:
+ Dump("unsafenmagic", r)
+ Fatal("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
+ goto bad
+ }
+ }
+
+ v += r1.Xoffset
+ goto yes
+ }
+
+ if s.Name == "Alignof" {
+ typecheck(&r, Erv)
+ defaultlit(&r, nil)
+ tr = r.Type
+ if tr == nil {
+ goto bad
+ }
+
+ // make struct { byte; T; }
+ t = typ(TSTRUCT)
+
+ t.Type = typ(TFIELD)
+ t.Type.Type = Types[TUINT8]
+ t.Type.Down = typ(TFIELD)
+ t.Type.Down.Type = tr
+
+ // compute struct widths
+ dowidth(t)
+
+ // the offset of T is its required alignment
+ v = t.Type.Down.Width
+
+ goto yes
+ }
+
+no:
+ return nil
+
+bad:
+ Yyerror("invalid expression %v", Nconv(nn, 0))
+ v = 0
+ goto ret
+
+yes:
+ if args.Next != nil {
+ Yyerror("extra arguments for %v", Sconv(s, 0))
+ }
+
+ // any side effects disappear; ignore init
+ret:
+ val.Ctype = CTINT
+
+ val.U.Xval = new(Mpint)
+ Mpmovecfix(val.U.Xval, v)
+ n = Nod(OLITERAL, nil, nil)
+ n.Orig = nn
+ n.Val = val
+ n.Type = Types[TUINTPTR]
+ nn.Type = Types[TUINTPTR]
+ return n
+}
+
+func isunsafebuiltin(n *Node) int {
+ if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
+ return 0
+ }
+ if n.Sym.Name == "Sizeof" {
+ return 1
+ }
+ if n.Sym.Name == "Offsetof" {
+ return 1
+ }
+ if n.Sym.Name == "Alignof" {
+ return 1
+ }
+ return 0
+}
diff --git a/src/cmd/internal/gc/util.go b/src/cmd/internal/gc/util.go
new file mode 100644
index 0000000000..ceb3eead35
--- /dev/null
+++ b/src/cmd/internal/gc/util.go
@@ -0,0 +1,70 @@
+package gc
+
+import (
+ "cmd/internal/obj"
+ "strconv"
+ "strings"
+)
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func (n *Node) Line() string {
+ return obj.Linklinefmt(Ctxt, int(n.Lineno), false, false)
+}
+
+func atoi(s string) int {
+ // NOTE: Not strconv.Atoi, accepts hex and octal prefixes.
+ n, _ := strconv.ParseInt(s, 0, 0)
+ return int(n)
+}
+
+func isalnum(c int) bool {
+ return isalpha(c) || isdigit(c)
+}
+
+func isalpha(c int) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+func isdigit(c int) bool {
+ return '0' <= c && c <= '9'
+}
+
+func plan9quote(s string) string {
+ if s == "" {
+ goto needquote
+ }
+ for i := 0; i < len(s); i++ {
+ if s[i] <= ' ' || s[i] == '\'' {
+ goto needquote
+ }
+ }
+ return s
+
+needquote:
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
+}
+
+// simulation of int(*s++) in C
+func intstarstringplusplus(s string) (int, string) {
+ if s == "" {
+ return 0, ""
+ }
+ return int(s[0]), s[1:]
+}
+
+// strings.Compare, introduced in Go 1.5.
+func stringsCompare(a, b string) int {
+ if a == b {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return +1
+}
diff --git a/src/cmd/internal/gc/walk.go b/src/cmd/internal/gc/walk.go
new file mode 100644
index 0000000000..37299ca123
--- /dev/null
+++ b/src/cmd/internal/gc/walk.go
@@ -0,0 +1,4531 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+var mpzero Mpint
+
+// The constant is known to runtime.
+const (
+ tmpstringbufsize = 32
+)
+
+func walk(fn *Node) {
+ var s string
+ var l *NodeList
+ var lno int
+
+ Curfn = fn
+
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("\nbefore %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Nbody)
+ }
+
+ lno = int(lineno)
+
+ // Final typecheck for any unused variables.
+ // It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO {
+ typecheck(&l.N, Erv|Easgn)
+ }
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO && l.N.Defn != nil && l.N.Defn.Op == OTYPESW && l.N.Used != 0 {
+ l.N.Defn.Left.Used++
+ }
+ }
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op != ONAME || l.N.Class&^PHEAP != PAUTO || l.N.Sym.Name[0] == '&' || l.N.Used != 0 {
+ continue
+ }
+ if l.N.Defn != nil && l.N.Defn.Op == OTYPESW {
+ if l.N.Defn.Left.Used != 0 {
+ continue
+ }
+ lineno = l.N.Defn.Left.Lineno
+ Yyerror("%v declared and not used", Sconv(l.N.Sym, 0))
+ l.N.Defn.Left.Used = 1 // suppress repeats
+ } else {
+ lineno = l.N.Lineno
+ Yyerror("%v declared and not used", Sconv(l.N.Sym, 0))
+ }
+ }
+
+ lineno = int32(lno)
+ if nerrors != 0 {
+ return
+ }
+ walkstmtlist(Curfn.Nbody)
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("after walk %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Nbody)
+ }
+
+ heapmoves()
+ if Debug['W'] != 0 && Curfn.Enter != nil {
+ s = fmt.Sprintf("enter %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Enter)
+ }
+}
+
+func walkstmtlist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ walkstmt(&l.N)
+ }
+}
+
+func samelist(a *NodeList, b *NodeList) int {
+ for ; a != nil && b != nil; (func() { a = a.Next; b = b.Next })() {
+ if a.N != b.N {
+ return 0
+ }
+ }
+ return bool2int(a == b)
+}
+
+func paramoutheap(fn *Node) int {
+ var l *NodeList
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ switch l.N.Class {
+ case PPARAMOUT,
+ PPARAMOUT | PHEAP:
+ return int(l.N.Addrtaken)
+
+ // stop early - parameters are over
+ case PAUTO,
+ PAUTO | PHEAP:
+ return 0
+ }
+ }
+
+ return 0
+}
+
+// adds "adjust" to all the argument locations for the call n.
+// n must be a defer or go node that has already been walked.
+func adjustargs(n *Node, adjust int) {
+ var callfunc *Node
+ var arg *Node
+ var lhs *Node
+ var args *NodeList
+
+ callfunc = n.Left
+ for args = callfunc.List; args != nil; args = args.Next {
+ arg = args.N
+ if arg.Op != OAS {
+ Yyerror("call arg not assignment")
+ }
+ lhs = arg.Left
+ if lhs.Op == ONAME {
+ // This is a temporary introduced by reorder1.
+ // The real store to the stack appears later in the arg list.
+ continue
+ }
+
+ if lhs.Op != OINDREG {
+ Yyerror("call argument store does not use OINDREG")
+ }
+
+ // can't really check this in machine-indep code.
+ //if(lhs->val.u.reg != D_SP)
+ // yyerror("call arg assign not indreg(SP)");
+ lhs.Xoffset += int64(adjust)
+ }
+}
+
+func walkstmt(np **Node) {
+ var init *NodeList
+ var ll *NodeList
+ var rl *NodeList
+ var cl int
+ var n *Node
+ var f *Node
+
+ n = *np
+ if n == nil {
+ return
+ }
+ if n.Dodata == 2 { // don't walk, generated by anylit.
+ return
+ }
+
+ setlineno(n)
+
+ walkstmtlist(n.Ninit)
+
+ switch n.Op {
+ default:
+ if n.Op == ONAME {
+ Yyerror("%v is not a top level statement", Sconv(n.Sym, 0))
+ } else {
+ Yyerror("%v is not a top level statement", Oconv(int(n.Op), 0))
+ }
+ Dump("nottop", n)
+
+ case OAS,
+ OASOP,
+ OAS2,
+ OAS2DOTTYPE,
+ OAS2RECV,
+ OAS2FUNC,
+ OAS2MAPR,
+ OCLOSE,
+ OCOPY,
+ OCALLMETH,
+ OCALLINTER,
+ OCALL,
+ OCALLFUNC,
+ ODELETE,
+ OSEND,
+ OPRINT,
+ OPRINTN,
+ OPANIC,
+ OEMPTY,
+ ORECOVER:
+ if n.Typecheck == 0 {
+ Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init = n.Ninit
+ n.Ninit = nil
+ walkexpr(&n, &init)
+ addinit(&n, init)
+ if (*np).Op == OCOPY && n.Op == OCONVNOP {
+ n.Op = OEMPTY // don't leave plain values as statements.
+ }
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ORECV:
+ if n.Typecheck == 0 {
+ Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init = n.Ninit
+ n.Ninit = nil
+
+ walkexpr(&n.Left, &init)
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
+ walkexpr(&n, &init)
+
+ addinit(&n, init)
+
+ case OBREAK,
+ ODCL,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL,
+ ODCLCONST,
+ ODCLTYPE,
+ OCHECKNIL,
+ OVARKILL:
+ break
+
+ case OBLOCK:
+ walkstmtlist(n.List)
+
+ case OXCASE:
+ Yyerror("case statement out of place")
+ n.Op = OCASE
+ fallthrough
+
+ case OCASE:
+ walkstmt(&n.Right)
+
+ case ODEFER:
+ Hasdefer = 1
+ switch n.Left.Op {
+ case OPRINT,
+ OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, 1)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case OFOR:
+ if n.Ntest != nil {
+ walkstmtlist(n.Ntest.Ninit)
+ init = n.Ntest.Ninit
+ n.Ntest.Ninit = nil
+ walkexpr(&n.Ntest, &init)
+ addinit(&n.Ntest, init)
+ }
+
+ walkstmt(&n.Nincr)
+ walkstmtlist(n.Nbody)
+
+ case OIF:
+ walkexpr(&n.Ntest, &n.Ninit)
+ walkstmtlist(n.Nbody)
+ walkstmtlist(n.Nelse)
+
+ case OPROC:
+ switch n.Left.Op {
+ case OPRINT,
+ OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, 1)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case ORETURN:
+ walkexprlist(n.List, &n.Ninit)
+ if n.List == nil {
+ break
+ }
+ if (Curfn.Type.Outnamed != 0 && count(n.List) > 1) || paramoutheap(Curfn) != 0 {
+ // assign to the function out parameters,
+ // so that reorder3 can fix up conflicts
+ rl = nil
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ cl = int(ll.N.Class) &^ PHEAP
+ if cl == PAUTO {
+ break
+ }
+ if cl == PPARAMOUT {
+ rl = list(rl, ll.N)
+ }
+ }
+
+ if samelist(rl, n.List) != 0 {
+ // special return in disguise
+ n.List = nil
+
+ break
+ }
+
+ if count(n.List) == 1 && count(rl) > 1 {
+ // OAS2FUNC in disguise
+ f = n.List.N
+
+ if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
+ Fatal("expected return of call, have %v", Nconv(f, 0))
+ }
+ n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
+ break
+ }
+
+ // move function calls out, to make reorder3's job easier.
+ walkexprlistsafe(n.List, &n.Ninit)
+
+ ll = ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+ n.List = reorder3(ll)
+ break
+ }
+
+ ll = ascompatte(int(n.Op), nil, 0, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+ n.List = ll
+
+ case ORETJMP:
+ break
+
+ case OSELECT:
+ walkselect(n)
+
+ case OSWITCH:
+ walkswitch(n)
+
+ case ORANGE:
+ walkrange(n)
+
+ case OXFALL:
+ Yyerror("fallthrough statement out of place")
+ n.Op = OFALL
+ }
+
+ if n.Op == ONAME {
+ Fatal("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
+ }
+
+ *np = n
+}
+
+/*
+ * walk the whole tree of the body of an
+ * expression or simple statement.
+ * the types expressions are calculated.
+ * compile-time constants are evaluated.
+ * complex side effects like statements are appended to init
+ */
+func walkexprlist(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistsafe(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = safeexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistcheap(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexpr(np **Node, init **NodeList) {
+ var r *Node
+ var l *Node
+ var var_ *Node
+ var a *Node
+ var ok *Node
+ var map_ *Node
+ var key *Node
+ var ll *NodeList
+ var lr *NodeList
+ var t *Type
+ var et int
+ var old_safemode int
+ var v int64
+ var lno int32
+ var n *Node
+ var fn *Node
+ var n1 *Node
+ var n2 *Node
+ var sym *Sym
+ var buf string
+ var p string
+ var from string
+ var to string
+
+ n = *np
+
+ if n == nil {
+ return
+ }
+
+ if init == &n.Ninit {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ Fatal("walkexpr init == &n->ninit")
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ // annoying case - not typechecked
+ if n.Op == OKEY {
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ return
+ }
+
+ lno = setlineno(n)
+
+ if Debug['w'] > 1 {
+ Dump("walk-before", n)
+ }
+
+ if n.Typecheck != 1 {
+ Fatal("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ switch n.Op {
+ default:
+ Dump("walk", n)
+ Fatal("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OTYPE,
+ ONONAME,
+ OINDREG,
+ OEMPTY,
+ OPARAM:
+ goto ret
+
+ case ONOT,
+ OMINUS,
+ OPLUS,
+ OCOM,
+ OREAL,
+ OIMAG,
+ ODOTMETH,
+ ODOTINTER:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OIND:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ODOT:
+ usefield(n)
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ODOTPTR:
+ usefield(n)
+ if n.Op == ODOTPTR && n.Left.Type.Type.Width == 0 {
+ // No actual copy will be generated, so emit an explicit nil check.
+ n.Left = cheapexpr(n.Left, init)
+
+ checknil(n.Left, init)
+ }
+
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OEFACE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OSPTR,
+ OITAB:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OLEN,
+ OCAP:
+ walkexpr(&n.Left, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t = n.Left.Type
+
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) != 0 {
+ safeexpr(n.Left, init)
+ Nodconst(n, n.Type, t.Bound)
+ n.Typecheck = 1
+ }
+
+ goto ret
+
+ case OLSH,
+ ORSH:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ t = n.Left.Type
+ n.Bounded = uint8(bounded(n.Right, 8*t.Width))
+ if Debug['m'] != 0 && n.Etype != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ Warn("shift bounds check elided")
+ }
+ goto ret
+
+ // Use results from call expression as arguments for complex.
+ case OAND,
+ OSUB,
+ OHMUL,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ OADD,
+ OCOMPLEX,
+ OLROT:
+ if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
+ n.Left = n.List.N
+ n.Right = n.List.Next.N
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OOR,
+ OXOR:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkrotate(&n)
+ goto ret
+
+ case OEQ,
+ ONE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ walkcompare(&n, init)
+ safemode = old_safemode
+ goto ret
+
+ case OANDAND,
+ OOROR:
+ walkexpr(&n.Left, init)
+
+ // cannot put side effects from n->right on init,
+ // because they cannot run before n->left is checked.
+ // save elsewhere and store on the eventual n->right.
+ ll = nil
+
+ walkexpr(&n.Right, &ll)
+ addinit(&n.Right, ll)
+ goto ret
+
+ case OPRINT,
+ OPRINTN:
+ walkexprlist(n.List, init)
+ n = walkprint(n, init)
+ goto ret
+
+ case OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left)
+ goto ret
+
+ case ORECOVER:
+ n = mkcall("gorecover", n.Type, init, Nod(OADDR, nodfp, nil))
+ goto ret
+
+ case OLITERAL:
+ n.Addable = 1
+ goto ret
+
+ case OCLOSUREVAR,
+ OCFUNC:
+ n.Addable = 1
+ goto ret
+
+ case ONAME:
+ if !(n.Class&PHEAP != 0) && n.Class != PPARAMREF {
+ n.Addable = 1
+ }
+ goto ret
+
+ case OCALLINTER:
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OCALLFUNC:
+ if n.Left.Op == OCLOSURE {
+ // Transform direct call of a closure to call of a normal function.
+ // transformclosure already did all preparation work.
+
+ // Append captured variables to argument list.
+ n.List = concat(n.List, n.Left.Enter)
+
+ n.Left.Enter = nil
+
+ // Replace OCLOSURE with ONAME/PFUNC.
+ n.Left = n.Left.Closure.Nname
+
+ // Update type of OCALLFUNC node.
+ // Output arguments had not changed, but their offsets could.
+ if n.Left.Type.Outtuple == 1 {
+ t = getoutargx(n.Left.Type).Type
+ if t.Etype == TFIELD {
+ t = t.Type
+ }
+ n.Type = t
+ } else {
+ n.Type = getoutargx(n.Left.Type)
+ }
+ }
+
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+
+ ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OCALLMETH:
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll = ascompatte(int(n.Op), n, 0, getthis(t), list1(n.Left.Left), 0, init)
+ lr = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ ll = concat(ll, lr)
+ n.Left.Left = nil
+ ullmancalc(n.Left)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OAS:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+
+ walkexpr(&n.Left, init)
+ n.Left = safeexpr(n.Left, init)
+
+ if oaslit(n, init) != 0 {
+ goto ret
+ }
+
+ if n.Right == nil || iszero(n.Right) != 0 && !(flag_race != 0) {
+ goto ret
+ }
+
+ switch n.Right.Op {
+ default:
+ walkexpr(&n.Right, init)
+
+ // x = i.(T); n->left is x, n->right->left is i.
+ // orderstmt made sure x is addressable.
+ case ODOTTYPE:
+ walkexpr(&n.Right.Left, init)
+
+ n1 = Nod(OADDR, n.Left, nil)
+ r = n.Right // i.(T)
+
+ from = "I"
+
+ to = "T"
+ if isnilinter(r.Left.Type) != 0 {
+ from = "E"
+ }
+ if isnilinter(r.Type) != 0 {
+ to = "E"
+ } else if Isinter(r.Type) != 0 {
+ to = "I"
+ }
+
+ buf = fmt.Sprintf("assert%s2%s", from, to)
+
+ fn = syslook(buf, 1)
+ argtype(fn, r.Left.Type)
+ argtype(fn, r.Type)
+
+ n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
+ walkexpr(&n, init)
+ goto ret
+
+ // x = <-c; n->left is x, n->right->left is c.
+ // orderstmt made sure x is addressable.
+ case ORECV:
+ walkexpr(&n.Right.Left, init)
+
+ n1 = Nod(OADDR, n.Left, nil)
+ r = n.Right.Left // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
+ walkexpr(&n, init)
+ goto ret
+ }
+
+ if n.Left != nil && n.Right != nil {
+ r = convas(Nod(OAS, n.Left, n.Right), init)
+ r.Dodata = n.Dodata
+ n = r
+ n = applywritebarrier(n, init)
+ }
+
+ goto ret
+
+ case OAS2:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ walkexprlistsafe(n.List, init)
+ walkexprlistsafe(n.Rlist, init)
+ ll = ascompatee(OAS, n.List, n.Rlist, init)
+ ll = reorder3(ll)
+ for lr = ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(ll)
+ goto ret
+
+ // a,b,... = fn()
+ case OAS2FUNC:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r, init)
+
+ ll = ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+ for lr = ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(concat(list1(r), ll))
+ goto ret
+
+ // x, y = <-c
+ // orderstmt made sure x is addressable.
+ case OAS2RECV:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ if isblank(n.List.N) {
+ n1 = nodnil()
+ } else {
+ n1 = Nod(OADDR, n.List.N, nil)
+ }
+ n1.Etype = 1 // addr does not escape
+ fn = chanfn("chanrecv2", 2, r.Left.Type)
+ r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
+ n = Nod(OAS, n.List.Next.N, r)
+ typecheck(&n, Etop)
+ goto ret
+
+ // a,b = m[i];
+ case OAS2MAPR:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ walkexpr(&r.Right, init)
+ t = r.Left.Type
+ p = ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch Simsimtype(t.Down) {
+ case TINT32,
+ TUINT32:
+ p = "mapaccess2_fast32"
+
+ case TINT64,
+ TUINT64:
+ p = "mapaccess2_fast64"
+
+ case TSTRING:
+ p = "mapaccess2_faststr"
+ }
+ }
+
+ if p != "" {
+ // fast versions take key by value
+ key = r.Right
+ } else {
+ // standard version takes key by reference
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, r.Right, nil)
+
+ p = "mapaccess2"
+ }
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a = n.List.N
+
+ fn = mapfn(p, t)
+ r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if !isblank(n.List.Next.N) {
+ r.Type.Type.Down.Type = n.List.Next.N.Type
+ }
+ n.Rlist = list1(r)
+ n.Op = OAS2FUNC
+
+ // don't generate a = *var if a is _
+ if !isblank(a) {
+ var_ = temp(Ptrto(t.Type))
+ var_.Typecheck = 1
+ n.List.N = var_
+ walkexpr(&n, init)
+ *init = list(*init, n)
+ n = Nod(OAS, a, Nod(OIND, var_, nil))
+ }
+
+ typecheck(&n, Etop)
+ walkexpr(&n, init)
+
+ // mapaccess needs a zero value to be at least this big.
+ if zerosize < t.Type.Width {
+ zerosize = t.Type.Width
+ }
+
+ // TODO: ptr is always non-nil, so disable nil check for this OIND op.
+ goto ret
+
+ case ODELETE:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ map_ = n.List.N
+ key = n.List.Next.N
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+
+ // orderstmt made sure key is addressable.
+ key = Nod(OADDR, key, nil)
+
+ t = map_.Type
+ n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
+ goto ret
+
+ // a,b = i.(T)
+ // orderstmt made sure a is addressable.
+ case OAS2DOTTYPE:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ if isblank(n.List.N) {
+ n1 = nodnil()
+ } else {
+ n1 = Nod(OADDR, n.List.N, nil)
+ }
+ n1.Etype = 1 // addr does not escape
+
+ from = "I"
+
+ to = "T"
+ if isnilinter(r.Left.Type) != 0 {
+ from = "E"
+ }
+ if isnilinter(r.Type) != 0 {
+ to = "E"
+ } else if Isinter(r.Type) != 0 {
+ to = "I"
+ }
+ buf = fmt.Sprintf("assert%s2%s2", from, to)
+
+ fn = syslook(buf, 1)
+ argtype(fn, r.Left.Type)
+ argtype(fn, r.Type)
+
+ t = Types[TBOOL]
+ ok = n.List.Next.N
+ if !isblank(ok) {
+ t = ok.Type
+ }
+ r = mkcall1(fn, t, init, typename(r.Type), r.Left, n1)
+ n = Nod(OAS, ok, r)
+ typecheck(&n, Etop)
+ goto ret
+
+ case ODOTTYPE,
+ ODOTTYPE2:
+ Fatal("walkexpr ODOTTYPE") // should see inside OAS or OAS2 only
+ fallthrough
+
+ case OCONVIFACE:
+ walkexpr(&n.Left, init)
+
+ // Optimize convT2E as a two-word copy when T is pointer-shaped.
+ if isnilinter(n.Type) != 0 && isdirectiface(n.Left.Type) != 0 {
+ l = Nod(OEFACE, typename(n.Left.Type), n.Left)
+ l.Type = n.Type
+ l.Typecheck = n.Typecheck
+ n = l
+ goto ret
+ }
+
+ // Build name of function: convI2E etc.
+ // Not all names are possible
+ // (e.g., we'll never generate convE2E or convE2I).
+ from = "T"
+
+ to = "I"
+ if isnilinter(n.Left.Type) != 0 {
+ from = "E"
+ } else if Isinter(n.Left.Type) != 0 {
+ from = "I"
+ }
+ if isnilinter(n.Type) != 0 {
+ to = "E"
+ }
+ buf = fmt.Sprintf("conv%s2%s", from, to)
+
+ fn = syslook(buf, 1)
+ ll = nil
+ if !(Isinter(n.Left.Type) != 0) {
+ ll = list(ll, typename(n.Left.Type))
+ }
+ if !(isnilinter(n.Type) != 0) {
+ ll = list(ll, typename(n.Type))
+ }
+ if !(Isinter(n.Left.Type) != 0) && !(isnilinter(n.Type) != 0) {
+ sym = Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
+ if sym.Def == nil {
+ l = Nod(ONAME, nil, nil)
+ l.Sym = sym
+ l.Type = Ptrto(Types[TUINT8])
+ l.Addable = 1
+ l.Class = PEXTERN
+ l.Xoffset = 0
+ sym.Def = l
+ ggloblsym(sym, int32(Widthptr), obj.DUPOK|obj.NOPTR)
+ }
+
+ l = Nod(OADDR, sym.Def, nil)
+ l.Addable = 1
+ ll = list(ll, l)
+
+ if isdirectiface(n.Left.Type) != 0 {
+ /* For pointer types, we can make a special form of optimization
+ *
+ * These statements are put onto the expression init list:
+ * Itab *tab = atomicloadtype(&cache);
+ * if(tab == nil)
+ * tab = typ2Itab(type, itype, &cache);
+ *
+ * The CONVIFACE expression is replaced with this:
+ * OEFACE{tab, ptr};
+ */
+ l = temp(Ptrto(Types[TUINT8]))
+
+ n1 = Nod(OAS, l, sym.Def)
+ typecheck(&n1, Etop)
+ *init = list(*init, n1)
+
+ fn = syslook("typ2Itab", 1)
+ n1 = Nod(OCALL, fn, nil)
+ n1.List = ll
+ typecheck(&n1, Erv)
+ walkexpr(&n1, init)
+
+ n2 = Nod(OIF, nil, nil)
+ n2.Ntest = Nod(OEQ, l, nodnil())
+ n2.Nbody = list1(Nod(OAS, l, n1))
+ n2.Likely = -1
+ typecheck(&n2, Etop)
+ *init = list(*init, n2)
+
+ l = Nod(OEFACE, l, n.Left)
+ l.Typecheck = n.Typecheck
+ l.Type = n.Type
+ n = l
+ goto ret
+ }
+ }
+
+ if Isinter(n.Left.Type) != 0 {
+ ll = list(ll, n.Left)
+ } else {
+ // regular types are passed by reference to avoid C vararg calls
+ // orderexpr arranged for n->left to be a temporary for all
+ // the conversions it could see. comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to orderstmt, so we
+ // have to fall back on allocating a temp here.
+ if islvalue(n.Left) != 0 {
+ ll = list(ll, Nod(OADDR, n.Left, nil))
+ } else {
+ ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
+ }
+ }
+
+ argtype(fn, n.Left.Type)
+ argtype(fn, n.Type)
+ dowidth(fn.Type)
+ n = Nod(OCALL, fn, nil)
+ n.List = ll
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ goto ret
+
+ case OCONV,
+ OCONVNOP:
+ if Thearch.Thechar == '5' {
+ if Isfloat[n.Left.Type.Etype] != 0 {
+ if n.Type.Etype == TINT64 {
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ goto ret
+ }
+
+ if n.Type.Etype == TUINT64 {
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ goto ret
+ }
+ }
+
+ if Isfloat[n.Type.Etype] != 0 {
+ if n.Left.Type.Etype == TINT64 {
+ n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
+ goto ret
+ }
+
+ if n.Left.Type.Etype == TUINT64 {
+ n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
+ goto ret
+ }
+ }
+ }
+
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OANDNOT:
+ walkexpr(&n.Left, init)
+ n.Op = OAND
+ n.Right = Nod(OCOM, n.Right, nil)
+ typecheck(&n.Right, Erv)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OMUL:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkmul(&n, init)
+ goto ret
+
+ case ODIV,
+ OMOD:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ /*
+ * rewrite complex div into function call.
+ */
+ et = int(n.Left.Type.Etype)
+
+ if Iscomplex[et] != 0 && n.Op == ODIV {
+ t = n.Type
+ n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
+ n = conv(n, t)
+ goto ret
+ }
+
+ // Nothing to do for float divisions.
+ if Isfloat[et] != 0 {
+ goto ret
+ }
+
+ // Try rewriting as shifts or magic multiplies.
+ walkdiv(&n, init)
+
+ /*
+ * rewrite 64-bit div and mod into function calls
+ * on 32-bit architectures.
+ */
+ switch n.Op {
+ case OMOD,
+ ODIV:
+ if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
+ goto ret
+ }
+ if et == TINT64 {
+ namebuf = "int64"
+ } else {
+ namebuf = "uint64"
+ }
+ if n.Op == ODIV {
+ namebuf += "div"
+ } else {
+ namebuf += "mod"
+ }
+ n = mkcall(namebuf, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
+
+ default:
+ break
+ }
+
+ goto ret
+
+ case OINDEX:
+ walkexpr(&n.Left, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r = n.Right
+
+ walkexpr(&n.Right, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded != 0 {
+ goto ret
+ }
+ t = n.Left.Type
+ if t != nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) != 0 {
+ n.Bounded = uint8(bounded(r, t.Bound))
+ if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) != 0 && !(n.Bounded != 0) {
+ Yyerror("index out of bounds")
+ }
+ } else if Isconst(n.Left, CTSTR) != 0 {
+ n.Bounded = uint8(bounded(r, int64(len(n.Left.Val.U.Sval.S))))
+ if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) != 0 {
+ if !(n.Bounded != 0) {
+ Yyerror("index out of bounds")
+ } else {
+ // replace "abc"[1] with 'b'.
+ // delayed until now because "abc"[1] is not
+ // an ideal constant.
+ v = Mpgetfix(n.Right.Val.U.Xval)
+
+ Nodconst(n, n.Type, int64(n.Left.Val.U.Sval.S[v]))
+ n.Typecheck = 1
+ }
+ }
+ }
+
+ if Isconst(n.Right, CTINT) != 0 {
+ if Mpcmpfixfix(n.Right.Val.U.Xval, &mpzero) < 0 || Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("index out of bounds")
+ }
+ }
+ goto ret
+
+ case OINDEXMAP:
+ if n.Etype == 1 {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ t = n.Left.Type
+ p = ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch Simsimtype(t.Down) {
+ case TINT32,
+ TUINT32:
+ p = "mapaccess1_fast32"
+
+ case TINT64,
+ TUINT64:
+ p = "mapaccess1_fast64"
+
+ case TSTRING:
+ p = "mapaccess1_faststr"
+ }
+ }
+
+ if p != "" {
+ // fast versions take key by value
+ key = n.Right
+ } else {
+ // standard version takes key by reference.
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, n.Right, nil)
+
+ p = "mapaccess1"
+ }
+
+ n = mkcall1(mapfn(p, t), Ptrto(t.Type), init, typename(t), n.Left, key)
+ n = Nod(OIND, n, nil)
+ n.Type = t.Type
+ n.Typecheck = 1
+
+ // mapaccess needs a zero value to be at least this big.
+ if zerosize < t.Type.Width {
+ zerosize = t.Type.Width
+ }
+ goto ret
+
+ case ORECV:
+ Fatal("walkexpr ORECV") // should see inside OAS only
+ fallthrough
+
+ case OSLICE:
+ if n.Right != nil && n.Right.Left == nil && n.Right.Right == nil { // noop
+ walkexpr(&n.Left, init)
+ n = n.Left
+ goto ret
+ }
+ fallthrough
+
+ // fallthrough
+ case OSLICEARR,
+ OSLICESTR:
+ if n.Right == nil { // already processed
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+
+ // cgen_slice can't handle string literals as source
+ // TODO the OINDEX case is a bug elsewhere that needs to be traced. it causes a crash on ([2][]int{ ... })[1][lo:hi]
+ if (n.Op == OSLICESTR && n.Left.Op == OLITERAL) || (n.Left.Op == OINDEX) {
+ n.Left = copyexpr(n.Left, n.Left.Type, init)
+ } else {
+ n.Left = safeexpr(n.Left, init)
+ }
+ walkexpr(&n.Right.Left, init)
+ n.Right.Left = safeexpr(n.Right.Left, init)
+ walkexpr(&n.Right.Right, init)
+ n.Right.Right = safeexpr(n.Right.Right, init)
+ n = sliceany(n, init) // chops n->right, sets n->list
+ goto ret
+
+ case OSLICE3,
+ OSLICE3ARR:
+ if n.Right == nil { // already processed
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+
+ // TODO the OINDEX case is a bug elsewhere that needs to be traced. it causes a crash on ([2][]int{ ... })[1][lo:hi]
+ // TODO the comment on the previous line was copied from case OSLICE. it might not even be true.
+ if n.Left.Op == OINDEX {
+ n.Left = copyexpr(n.Left, n.Left.Type, init)
+ } else {
+ n.Left = safeexpr(n.Left, init)
+ }
+ walkexpr(&n.Right.Left, init)
+ n.Right.Left = safeexpr(n.Right.Left, init)
+ walkexpr(&n.Right.Right.Left, init)
+ n.Right.Right.Left = safeexpr(n.Right.Right.Left, init)
+ walkexpr(&n.Right.Right.Right, init)
+ n.Right.Right.Right = safeexpr(n.Right.Right.Right, init)
+ n = sliceany(n, init) // chops n->right, sets n->list
+ goto ret
+
+ case OADDR:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ONEW:
+ if n.Esc == EscNone && n.Type.Type.Width < 1<<16 {
+ r = temp(n.Type.Type)
+ r = Nod(OAS, r, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, r.Left, nil)
+ typecheck(&r, Erv)
+ n = r
+ } else {
+ n = callnew(n.Type.Type)
+ }
+
+ goto ret
+
+ // If one argument to the comparison is an empty string,
+ // comparing the lengths instead will yield the same result
+ // without the function call.
+ case OCMPSTR:
+ if (Isconst(n.Left, CTSTR) != 0 && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) != 0 && len(n.Right.Val.U.Sval.S) == 0) {
+ r = Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+ }
+
+ // s + "badgerbadgerbadger" == "badgerbadgerbadger"
+ if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) != 0 && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) != 0 && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
+ r = Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+ }
+
+ if n.Etype == OEQ || n.Etype == ONE {
+ // prepare for rewrite below
+ n.Left = cheapexpr(n.Left, init)
+
+ n.Right = cheapexpr(n.Right, init)
+
+ r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ // quick check of len before full compare for == or !=
+ // eqstring assumes that the lengths are equal
+ if n.Etype == OEQ {
+ // len(left) == len(right) && eqstring(left, right)
+ r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ } else {
+ // len(left) != len(right) || !eqstring(left, right)
+ r = Nod(ONOT, r, nil)
+
+ r = Nod(OOROR, Nod(ONE, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ }
+
+ typecheck(&r, Erv)
+ walkexpr(&r, nil)
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ r = Nod(int(n.Etype), r, Nodintconst(0))
+ }
+
+ typecheck(&r, Erv)
+ if n.Type.Etype != TBOOL {
+ Fatal("cmp %v", Tconv(n.Type, 0))
+ }
+ r.Type = n.Type
+ n = r
+ goto ret
+
+ case OADDSTR:
+ n = addstr(n, init)
+ goto ret
+
+ case OAPPEND:
+ if n.Isddd != 0 {
+ n = appendslice(n, init) // also works for append(slice, string).
+ } else {
+ n = walkappend(n, init)
+ }
+ goto ret
+
+ case OCOPY:
+ n = copyany(n, init, flag_race)
+ goto ret
+
+ // cannot use chanfn - closechan takes any, not chan any
+ case OCLOSE:
+ fn = syslook("closechan", 1)
+
+ argtype(fn, n.Left.Type)
+ n = mkcall1(fn, nil, init, n.Left)
+ goto ret
+
+ case OMAKECHAN:
+ n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
+ goto ret
+
+ case OMAKEMAP:
+ t = n.Type
+
+ fn = syslook("makemap", 1)
+
+ a = nodnil() // hmap buffer
+ r = nodnil() // bucket buffer
+ if n.Esc == EscNone {
+ // Allocate hmap buffer on stack.
+ var_ = temp(hmap(t))
+
+ a = Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ a = Nod(OADDR, var_, nil)
+
+ // Allocate one bucket on stack.
+ // Maximum key/value size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ var_ = temp(mapbucket(t))
+
+ r = Nod(OAS, var_, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, var_, nil)
+ }
+
+ argtype(fn, hmap(t)) // hmap buffer
+ argtype(fn, mapbucket(t)) // bucket buffer
+ argtype(fn, t.Down) // key type
+ argtype(fn, t.Type) // value type
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
+ goto ret
+
+ case OMAKESLICE:
+ l = n.Left
+ r = n.Right
+ if r == nil {
+ r = safeexpr(l, init)
+ l = r
+ }
+ t = n.Type
+ if n.Esc == EscNone && Smallintconst(l) != 0 && Smallintconst(r) != 0 && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
+ // var arr [r]T
+ // n = arr[:l]
+ t = aindex(r, t.Type) // [r]T
+ var_ = temp(t)
+ a = Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ r = Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
+ r = conv(r, n.Type) // in case n->type is named.
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ n = r
+ } else {
+ // makeslice(t *Type, nel int64, max int64) (ary []any)
+ fn = syslook("makeslice", 1)
+
+ argtype(fn, t.Type) // any-1
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
+ }
+
+ goto ret
+
+ case ORUNESTR:
+ a = nodnil()
+ if n.Esc == EscNone {
+ t = aindex(Nodintconst(4), Types[TUINT8])
+ var_ = temp(t)
+ a = Nod(OADDR, var_, nil)
+ }
+
+ // intstring(*[4]byte, rune)
+ n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
+
+ goto ret
+
+ case OARRAYBYTESTR:
+ a = nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ // slicebytetostring(*[32]byte, []byte) string;
+ n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
+
+ goto ret
+
+ // slicebytetostringtmp([]byte) string;
+ case OARRAYBYTESTRTMP:
+ n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
+
+ goto ret
+
+ // slicerunetostring(*[32]byte, []rune) string;
+ case OARRAYRUNESTR:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ goto ret
+
+ // stringtoslicebyte(*32[byte], string) []byte;
+ case OSTRARRAYBYTE:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
+ goto ret
+
+ // stringtoslicebytetmp(string) []byte;
+ case OSTRARRAYBYTETMP:
+ n = mkcall("stringtoslicebytetmp", n.Type, init, conv(n.Left, Types[TSTRING]))
+
+ goto ret
+
+ // stringtoslicerune(*[32]rune, string) []rune
+ case OSTRARRAYRUNE:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
+ goto ret
+
+ // ifaceeq(i1 any-1, i2 any-2) (ret bool);
+ case OCMPIFACE:
+ if !Eqtype(n.Left.Type, n.Right.Type) {
+ Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), Tconv(n.Left.Type, 0), Tconv(n.Right.Type, 0))
+ }
+ if isnilinter(n.Left.Type) != 0 {
+ fn = syslook("efaceeq", 1)
+ } else {
+ fn = syslook("ifaceeq", 1)
+ }
+
+ n.Right = cheapexpr(n.Right, init)
+ n.Left = cheapexpr(n.Left, init)
+ argtype(fn, n.Right.Type)
+ argtype(fn, n.Left.Type)
+ r = mkcall1(fn, n.Type, init, n.Left, n.Right)
+ if n.Etype == ONE {
+ r = Nod(ONOT, r, nil)
+ }
+
+ // check itable/type before full compare.
+ if n.Etype == OEQ {
+ r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ } else {
+ r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ }
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+
+ case OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OPTRLIT:
+ var_ = temp(n.Type)
+ anylit(0, n, var_, init)
+ n = var_
+ goto ret
+
+ case OSEND:
+ n1 = n.Right
+ n1 = assignconv(n1, n.Left.Type.Type, "chan send")
+ walkexpr(&n1, init)
+ n1 = Nod(OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
+ goto ret
+
+ case OCLOSURE:
+ n = walkclosure(n, init)
+ goto ret
+
+ case OCALLPART:
+ n = walkpartialcall(n, init)
+ goto ret
+ }
+
+ Fatal("missing switch %v", Oconv(int(n.Op), 0))
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ret:
+ t = n.Type
+
+ evconst(n)
+ n.Type = t
+ if n.Op == OLITERAL {
+ typecheck(&n, Erv)
+ }
+
+ ullmancalc(n)
+
+ if Debug['w'] != 0 && n != nil {
+ Dump("walk", n)
+ }
+
+ lineno = lno
+ *np = n
+}
+
+func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
+ var n *Node
+
+ // convas will turn map assigns into function calls,
+ // making it impossible for reorder3 to work.
+ n = Nod(OAS, l, r)
+
+ if l.Op == OINDEXMAP {
+ return n
+ }
+
+ return convas(n, init)
+}
+
+func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+ var ll *NodeList
+ var lr *NodeList
+ var nn *NodeList
+
+ /*
+ * check assign expression list to
+ * a expression list. called in
+ * expr-list = expr-list
+ */
+
+ // ensure order of evaluation for function calls
+ for ll = nl; ll != nil; ll = ll.Next {
+ ll.N = safeexpr(ll.N, init)
+ }
+ for lr = nr; lr != nil; lr = lr.Next {
+ lr.N = safeexpr(lr.N, init)
+ }
+
+ nn = nil
+ ll = nl
+ lr = nr
+ for ; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ORETURN && ll.N == lr.N {
+ continue
+ }
+ nn = list(nn, ascompatee1(op, ll.N, lr.N, init))
+ }
+
+ // cannot happen: caller checked that lists had same length
+ if ll != nil || lr != nil {
+ Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nl, obj.FmtSign), Oconv(int(op), 0), Hconv(nr, obj.FmtSign), count(nl), count(nr), Curfn.Nname.Sym.Name)
+ }
+ return nn
+}
+
+/*
+ * l is an lv and rt is the type of an rv
+ * return 1 if this implies a function call
+ * evaluating the lv or a function call
+ * in the conversion of the types
+ */
+func fncall(l *Node, rt *Type) int {
+ var r Node
+
+ if l.Ullman >= UINF || l.Op == OINDEXMAP {
+ return 1
+ }
+ r = Node{}
+ if needwritebarrier(l, &r) != 0 {
+ return 1
+ }
+ if Eqtype(l.Type, rt) {
+ return 0
+ }
+ return 1
+}
+
+func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+ var l *Node
+ var tmp *Node
+ var a *Node
+ var ll *NodeList
+ var r *Type
+ var saver Iter
+ var ucount int
+ var nn *NodeList
+ var mm *NodeList
+
+ /*
+ * check assign type list to
+ * a expression list. called in
+ * expr-list = func()
+ */
+ r = Structfirst(&saver, nr)
+
+ nn = nil
+ mm = nil
+ ucount = 0
+ for ll = nl; ll != nil; ll = ll.Next {
+ if r == nil {
+ break
+ }
+ l = ll.N
+ if isblank(l) {
+ r = structnext(&saver)
+ continue
+ }
+
+ // any lv that causes a fn call must be
+ // deferred until all the return arguments
+ // have been pulled from the output arguments
+ if fncall(l, r.Type) != 0 {
+ tmp = temp(r.Type)
+ typecheck(&tmp, Erv)
+ a = Nod(OAS, l, tmp)
+ a = convas(a, init)
+ mm = list(mm, a)
+ l = tmp
+ }
+
+ a = Nod(OAS, l, nodarg(r, fp))
+ a = convas(a, init)
+ ullmancalc(a)
+ if a.Ullman >= UINF {
+ Dump("ascompatet ucount", a)
+ ucount++
+ }
+
+ nn = list(nn, a)
+ r = structnext(&saver)
+ }
+
+ if ll != nil || r != nil {
+ Yyerror("ascompatet: assignment count mismatch: %d = %d", count(nl), structcount(*nr))
+ }
+
+ if ucount != 0 {
+ Fatal("ascompatet: too many function calls evaluating parameters")
+ }
+ return concat(nn, mm)
+}
+
+/*
+* package all the arguments that match a ... T parameter into a []T.
+ */
+func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
+ var a *Node
+ var n *Node
+ var tslice *Type
+ var esc int
+
+ esc = EscUnknown
+ if ddd != nil {
+ esc = int(ddd.Esc)
+ }
+
+ tslice = typ(TARRAY)
+ tslice.Type = l.Type.Type
+ tslice.Bound = -1
+
+ if count(lr0) == 0 {
+ n = nodnil()
+ n.Type = tslice
+ } else {
+ n = Nod(OCOMPLIT, nil, typenod(tslice))
+ if ddd != nil {
+ n.Alloc = ddd.Alloc // temporary to use
+ }
+ n.List = lr0
+ n.Esc = uint(esc)
+ typecheck(&n, Erv)
+ if n.Type == nil {
+ Fatal("mkdotargslice: typecheck failed")
+ }
+ walkexpr(&n, init)
+ }
+
+ a = Nod(OAS, nodarg(l, fp), n)
+ nn = list(nn, convas(a, init))
+ return nn
+}
+
+/*
+ * helpers for shape errors
+ */
+func dumptypes(nl **Type, what string) string {
+ var first int
+ var l *Type
+ var savel Iter
+ var fmt_ string
+
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("\t")
+ first = 1
+ for l = Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += fmt.Sprintf(", ")
+ }
+ fmt_ += fmt.Sprintf("%v", Tconv(l, 0))
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+func dumpnodetypes(l *NodeList, what string) string {
+ var first int
+ var r *Node
+ var fmt_ string
+
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("\t")
+ first = 1
+ for ; l != nil; l = l.Next {
+ r = l.N
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += fmt.Sprintf(", ")
+ }
+ fmt_ += fmt.Sprintf("%v", Tconv(r.Type, 0))
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+/*
+ * check assign expression list to
+ * a type list. called in
+ * return expr-list
+ * func(expr-list)
+ */
+func ascompatte(op int, call *Node, isddd int, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+ var l *Type
+ var ll *Type
+ var r *Node
+ var a *Node
+ var nn *NodeList
+ var lr0 *NodeList
+ var alist *NodeList
+ var savel Iter
+ var l1 string
+ var l2 string
+
+ lr0 = lr
+ l = Structfirst(&savel, nl)
+ r = nil
+ if lr != nil {
+ r = lr.N
+ }
+ nn = nil
+
+ // f(g()) where g has multiple return values
+ if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
+ // optimization - can do block copy
+ if eqtypenoname(r.Type, *nl) != 0 {
+ a = nodarg(*nl, fp)
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = a.Type
+ nn = list1(convas(Nod(OAS, a, r), init))
+ goto ret
+ }
+
+ // conversions involved.
+ // copy into temporaries.
+ alist = nil
+
+ for l = Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+ a = temp(l.Type)
+ alist = list(alist, a)
+ }
+
+ a = Nod(OAS2, nil, nil)
+ a.List = alist
+ a.Rlist = lr
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ lr = alist
+ r = lr.N
+ l = Structfirst(&savel, nl)
+ }
+
+loop:
+ if l != nil && l.Isddd != 0 {
+ // the ddd parameter must be last
+ ll = structnext(&savel)
+
+ if ll != nil {
+ Yyerror("... must be last argument")
+ }
+
+ // special case --
+ // only if we are assigning a single ddd
+ // argument to a ddd parameter then it is
+ // passed thru unencapsulated
+ if r != nil && lr.Next == nil && isddd != 0 && Eqtype(l.Type, r.Type) {
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+ goto ret
+ }
+
+ // normal case -- make a slice of all
+ // remaining arguments and pass it to
+ // the ddd parameter.
+ nn = mkdotargslice(lr, nn, l, fp, init, call.Right)
+
+ goto ret
+ }
+
+ if l == nil || r == nil {
+ if l != nil || r != nil {
+ l1 = dumptypes(nl, "expected")
+ l2 = dumpnodetypes(lr0, "given")
+ if l != nil {
+ Yyerror("not enough arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ } else {
+ Yyerror("too many arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ }
+ }
+
+ goto ret
+ }
+
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+
+ l = structnext(&savel)
+ r = nil
+ lr = lr.Next
+ if lr != nil {
+ r = lr.N
+ }
+ goto loop
+
+ret:
+ for lr = nn; lr != nil; lr = lr.Next {
+ lr.N.Typecheck = 1
+ }
+ return nn
+}
+
+// generate code for print
+func walkprint(nn *Node, init **NodeList) *Node {
+ var r *Node
+ var n *Node
+ var l *NodeList
+ var all *NodeList
+ var on *Node
+ var t *Type
+ var notfirst int
+ var et int
+ var op int
+ var calls *NodeList
+
+ op = int(nn.Op)
+ all = nn.List
+ calls = nil
+ notfirst = 0
+
+ // Hoist all the argument evaluation up before the lock.
+ walkexprlistcheap(all, init)
+
+ calls = list(calls, mkcall("printlock", nil, init))
+
+ for l = all; l != nil; l = l.Next {
+ if notfirst != 0 {
+ calls = list(calls, mkcall("printsp", nil, init))
+ }
+
+ notfirst = bool2int(op == OPRINTN)
+
+ n = l.N
+ if n.Op == OLITERAL {
+ switch n.Val.Ctype {
+ case CTRUNE:
+ defaultlit(&n, runetype)
+
+ case CTINT:
+ defaultlit(&n, Types[TINT64])
+
+ case CTFLT:
+ defaultlit(&n, Types[TFLOAT64])
+ }
+ }
+
+ if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
+ defaultlit(&n, Types[TINT64])
+ }
+ defaultlit(&n, nil)
+ l.N = n
+ if n.Type == nil || n.Type.Etype == TFORW {
+ continue
+ }
+
+ t = n.Type
+ et = int(n.Type.Etype)
+ if Isinter(n.Type) != 0 {
+ if isnilinter(n.Type) != 0 {
+ on = syslook("printeface", 1)
+ } else {
+ on = syslook("printiface", 1)
+ }
+ argtype(on, n.Type) // any-1
+ } else if Isptr[et] != 0 || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
+ on = syslook("printpointer", 1)
+ argtype(on, n.Type) // any-1
+ } else if Isslice(n.Type) != 0 {
+ on = syslook("printslice", 1)
+ argtype(on, n.Type) // any-1
+ } else if Isint[et] != 0 {
+ if et == TUINT64 {
+ if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" {
+ on = syslook("printhex", 0)
+ } else {
+ on = syslook("printuint", 0)
+ }
+ } else {
+ on = syslook("printint", 0)
+ }
+ } else if Isfloat[et] != 0 {
+ on = syslook("printfloat", 0)
+ } else if Iscomplex[et] != 0 {
+ on = syslook("printcomplex", 0)
+ } else if et == TBOOL {
+ on = syslook("printbool", 0)
+ } else if et == TSTRING {
+ on = syslook("printstring", 0)
+ } else {
+ badtype(OPRINT, n.Type, nil)
+ continue
+ }
+
+ t = *getinarg(on.Type)
+ if t != nil {
+ t = t.Type
+ }
+ if t != nil {
+ t = t.Type
+ }
+
+ if !Eqtype(t, n.Type) {
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ }
+
+ r = Nod(OCALL, on, nil)
+ r.List = list1(n)
+ calls = list(calls, r)
+ }
+
+ if op == OPRINTN {
+ calls = list(calls, mkcall("printnl", nil, nil))
+ }
+
+ calls = list(calls, mkcall("printunlock", nil, init))
+
+ typechecklist(calls, Etop)
+ walkexprlist(calls, init)
+
+ r = Nod(OEMPTY, nil, nil)
+ typecheck(&r, Etop)
+ walkexpr(&r, init)
+ r.Ninit = calls
+ return r
+}
+
+func callnew(t *Type) *Node {
+ var fn *Node
+
+ dowidth(t)
+ fn = syslook("newobject", 1)
+ argtype(fn, t)
+ return mkcall1(fn, Ptrto(t), nil, typename(t))
+}
+
+func isstack(n *Node) int {
+ var defn *Node
+
+ n = outervalue(n)
+
+ // If n is *autotmp and autotmp = &foo, replace n with foo.
+ // We introduce such temps when initializing struct literals.
+ if n.Op == OIND && n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+ defn = n.Left.Defn
+ if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
+ n = defn.Right.Left
+ }
+ }
+
+ switch n.Op {
+ // OINDREG only ends up in walk if it's indirect of SP.
+ case OINDREG:
+ return 1
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func isglobal(n *Node) int {
+ n = outervalue(n)
+
+ switch n.Op {
+ case ONAME:
+ switch n.Class {
+ case PEXTERN:
+ return 1
+ }
+ }
+
+ return 0
+}
+
+// Do we need a write barrier for the assignment l = r?
+func needwritebarrier(l *Node, r *Node) int {
+ if !(use_writebarrier != 0) {
+ return 0
+ }
+
+ if l == nil || isblank(l) {
+ return 0
+ }
+
+ // No write barrier for write of non-pointers.
+ dowidth(l.Type)
+
+ if !haspointers(l.Type) {
+ return 0
+ }
+
+ // No write barrier for write to stack.
+ if isstack(l) != 0 {
+ return 0
+ }
+
+ // No write barrier for implicit or explicit zeroing.
+ if r == nil || iszero(r) != 0 {
+ return 0
+ }
+
+ // No write barrier for initialization to constant.
+ if r.Op == OLITERAL {
+ return 0
+ }
+
+ // No write barrier for storing static (read-only) data.
+ if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
+ return 0
+ }
+
+ // No write barrier for storing address of stack values,
+ // which are guaranteed only to be written to the stack.
+ if r.Op == OADDR && isstack(r.Left) != 0 {
+ return 0
+ }
+
+ // No write barrier for storing address of global, which
+ // is live no matter what.
+ if r.Op == OADDR && isglobal(r.Left) != 0 {
+ return 0
+ }
+
+ // No write barrier for reslice: x = x[0:y] or x = append(x, ...).
+ // Both are compiled to modify x directly.
+ // In the case of append, a write barrier may still be needed
+ // if the underlying array grows, but the append code can
+ // generate the write barrier directly in that case.
+ // (It does not yet, but the cost of the write barrier will be
+ // small compared to the cost of the allocation.)
+ if r.Reslice != 0 {
+ switch r.Op {
+ case OSLICE,
+ OSLICE3,
+ OSLICESTR,
+ OAPPEND:
+ break
+
+ default:
+ Dump("bad reslice-l", l)
+ Dump("bad reslice-r", r)
+ }
+
+ return 0
+ }
+
+ // Otherwise, be conservative and use write barrier.
+ return 1
+}
+
+// TODO(rsc): Perhaps componentgen should run before this.
+
+var applywritebarrier_bv *Bvec
+
+func applywritebarrier(n *Node, init **NodeList) *Node {
+ var l *Node
+ var r *Node
+ var t *Type
+ var x int64
+ var name string
+
+ if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) != 0 {
+ if Curfn != nil && Curfn.Nowritebarrier {
+ Yyerror("write barrier prohibited")
+ }
+ t = n.Left.Type
+ l = Nod(OADDR, n.Left, nil)
+ l.Etype = 1 // addr does not escape
+ if t.Width == int64(Widthptr) {
+ n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
+ } else if t.Etype == TSTRING {
+ n = mkcall1(writebarrierfn("writebarrierstring", t, n.Right.Type), nil, init, l, n.Right)
+ } else if Isslice(t) != 0 {
+ n = mkcall1(writebarrierfn("writebarrierslice", t, n.Right.Type), nil, init, l, n.Right)
+ } else if Isinter(t) != 0 {
+ n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
+ } else if t.Width <= int64(4*Widthptr) {
+ x = 0
+ if applywritebarrier_bv == nil {
+ applywritebarrier_bv = bvalloc(obj.BitsPerPointer * 4)
+ }
+ bvresetall(applywritebarrier_bv)
+ twobitwalktype1(t, &x, applywritebarrier_bv)
+ const (
+ PtrBit = 1
+ )
+ // The bvgets are looking for BitsPointer in successive slots.
+ if obj.BitsPointer != 1<<PtrBit {
+ Fatal("wrong PtrBit")
+ }
+ switch t.Width / int64(Widthptr) {
+ default:
+ Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), Tconv(t, 0))
+ fallthrough
+
+ case 2:
+ name = fmt.Sprintf("writebarrierfat%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit))
+
+ case 3:
+ name = fmt.Sprintf("writebarrierfat%d%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 2*obj.BitsPerPointer+PtrBit))
+
+ case 4:
+ name = fmt.Sprintf("writebarrierfat%d%d%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 2*obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 3*obj.BitsPerPointer+PtrBit))
+ }
+
+ n = mkcall1(writebarrierfn(name, t, n.Right.Type), nil, init, l, nodnil(), n.Right)
+ } else {
+ r = n.Right
+ for r.Op == OCONVNOP {
+ r = r.Left
+ }
+ r = Nod(OADDR, r, nil)
+ r.Etype = 1 // addr does not escape
+
+ //warnl(n->lineno, "typedmemmove %T %N", t, r);
+ n = mkcall1(writebarrierfn("typedmemmove", t, r.Left.Type), nil, init, typename(t), l, r)
+ }
+ }
+
+ return n
+}
+
+func convas(n *Node, init **NodeList) *Node {
+ var lt *Type
+ var rt *Type
+ var map_ *Node
+ var key *Node
+ var val *Node
+
+ if n.Op != OAS {
+ Fatal("convas: not OAS %v", Oconv(int(n.Op), 0))
+ }
+
+ n.Typecheck = 1
+
+ if n.Left == nil || n.Right == nil {
+ goto out
+ }
+
+ lt = n.Left.Type
+ rt = n.Right.Type
+ if lt == nil || rt == nil {
+ goto out
+ }
+
+ if isblank(n.Left) {
+ defaultlit(&n.Right, nil)
+ goto out
+ }
+
+ if n.Left.Op == OINDEXMAP {
+ map_ = n.Left.Left
+ key = n.Left.Right
+ val = n.Right
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+ walkexpr(&val, init)
+
+ // orderexpr made sure key and val are addressable.
+ key = Nod(OADDR, key, nil)
+
+ val = Nod(OADDR, val, nil)
+ n = mkcall1(mapfn("mapassign1", map_.Type), nil, init, typename(map_.Type), map_, key, val)
+ goto out
+ }
+
+ if !Eqtype(lt, rt) {
+ n.Right = assignconv(n.Right, lt, "assignment")
+ walkexpr(&n.Right, init)
+ }
+
+out:
+ ullmancalc(n)
+ return n
+}
+
+/*
+ * from ascompat[te]
+ * evaluating actual function arguments.
+ * f(a,b)
+ * if there is exactly one function expr,
+ * then it is done first. otherwise must
+ * make temp variables
+ */
+func reorder1(all *NodeList) *NodeList {
+ var f *Node
+ var a *Node
+ var n *Node
+ var l *NodeList
+ var r *NodeList
+ var g *NodeList
+ var c int
+ var d int
+ var t int
+
+ c = 0 // function calls
+ t = 0 // total parameters
+
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ t++
+ ullmancalc(n)
+ if n.Ullman >= UINF {
+ c++
+ }
+ }
+
+ if c == 0 || t == 1 {
+ return all
+ }
+
+ g = nil // fncalls assigned to tempnames
+ f = nil // last fncall assigned to stack
+ r = nil // non fncalls and tempnames assigned to stack
+ d = 0
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ if n.Ullman < UINF {
+ r = list(r, n)
+ continue
+ }
+
+ d++
+ if d == c {
+ f = n
+ continue
+ }
+
+ // make assignment of fncall to tempname
+ a = temp(n.Right.Type)
+
+ a = Nod(OAS, a, n.Right)
+ g = list(g, a)
+
+ // put normal arg assignment on list
+ // with fncall replaced by tempname
+ n.Right = a.Left
+
+ r = list(r, n)
+ }
+
+ if f != nil {
+ g = list(g, f)
+ }
+ return concat(g, r)
+}
+
+/*
+ * from ascompat[ee]
+ * a,b = c,d
+ * simultaneous assignment. there cannot
+ * be later use of an earlier lvalue.
+ *
+ * function calls have been removed.
+ */
+func reorder3(all *NodeList) *NodeList {
+ var list *NodeList
+ var early *NodeList
+ var mapinit *NodeList
+ var l *Node
+
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ early = nil
+
+ mapinit = nil
+ for list = all; list != nil; list = list.Next {
+ l = list.N.Left
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ if l.Op == ODOT || l.Op == OPAREN {
+ l = l.Left
+ continue
+ }
+
+ if l.Op == OINDEX && Isfixedarray(l.Left.Type) != 0 {
+ reorder3save(&l.Right, all, list, &early)
+ l = l.Left
+ continue
+ }
+
+ break
+ }
+
+ switch l.Op {
+ default:
+ Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+ fallthrough
+
+ case ONAME:
+ break
+
+ case OINDEX,
+ OINDEXMAP:
+ reorder3save(&l.Left, all, list, &early)
+ reorder3save(&l.Right, all, list, &early)
+ if l.Op == OINDEXMAP {
+ list.N = convas(list.N, &mapinit)
+ }
+
+ case OIND,
+ ODOTPTR:
+ reorder3save(&l.Left, all, list, &early)
+ }
+
+ // Save expression on right side.
+ reorder3save(&list.N.Right, all, list, &early)
+ }
+
+ early = concat(mapinit, early)
+ return concat(early, all)
+}
+
+/*
+ * if the evaluation of *np would be affected by the
+ * assignments in all up to but not including stop,
+ * copy into a temporary during *early and
+ * replace *np with that temp.
+ */
+func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
+ var n *Node
+ var q *Node
+
+ n = *np
+ if !(aliased(n, all, stop) != 0) {
+ return
+ }
+
+ q = temp(n.Type)
+ q = Nod(OAS, q, n)
+ typecheck(&q, Etop)
+ *early = list(*early, q)
+ *np = q.Left
+}
+
+/*
+ * what's the outer value that a write to n affects?
+ * outer value means containing struct or array.
+ */
+func outervalue(n *Node) *Node {
+ for {
+ if n.Op == OXDOT {
+ Fatal("OXDOT in walk")
+ }
+ if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
+ n = n.Left
+ continue
+ }
+
+ if n.Op == OINDEX && Isfixedarray(n.Left.Type) != 0 {
+ n = n.Left
+ continue
+ }
+
+ break
+ }
+
+ return n
+}
+
+/*
+ * Is it possible that the computation of n might be
+ * affected by writes in as up to but not including stop?
+ */
+func aliased(n *Node, all *NodeList, stop *NodeList) int {
+ var memwrite int
+ var varwrite int
+ var a *Node
+ var l *NodeList
+
+ if n == nil {
+ return 0
+ }
+
+ // Look for obvious aliasing: a variable being assigned
+ // during the all list and appearing in n.
+ // Also record whether there are any writes to main memory.
+ // Also record whether there are any writes to variables
+ // whose addresses have been taken.
+ memwrite = 0
+
+ varwrite = 0
+ for l = all; l != stop; l = l.Next {
+ a = outervalue(l.N.Left)
+ if a.Op != ONAME {
+ memwrite = 1
+ continue
+ }
+
+ switch n.Class {
+ default:
+ varwrite = 1
+ continue
+
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ if n.Addrtaken != 0 {
+ varwrite = 1
+ continue
+ }
+
+ if vmatch2(a, n) != 0 {
+ // Direct hit.
+ return 1
+ }
+ }
+ }
+
+ // The variables being written do not appear in n.
+ // However, n might refer to computed addresses
+ // that are being written.
+
+ // If no computed addresses are affected by the writes, no aliasing.
+ if !(memwrite != 0) && !(varwrite != 0) {
+ return 0
+ }
+
+ // If n does not refer to computed addresses
+ // (that is, if n only refers to variables whose addresses
+ // have not been taken), no aliasing.
+ if varexpr(n) != 0 {
+ return 0
+ }
+
+ // Otherwise, both the writes and n refer to computed memory addresses.
+ // Assume that they might conflict.
+ return 1
+}
+
+/*
+ * does the evaluation of n only refer to variables
+ * whose addresses have not been taken?
+ * (and no other memory)
+ */
+func varexpr(n *Node) int {
+ if n == nil {
+ return 1
+ }
+
+ switch n.Op {
+ case OLITERAL:
+ return 1
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ if !(n.Addrtaken != 0) {
+ return 1
+ }
+ }
+
+ return 0
+
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ OMINUS,
+ OCOM,
+ OPAREN,
+ OANDAND,
+ OOROR,
+ ODOT, // but not ODOTPTR
+ OCONV,
+ OCONVNOP,
+ OCONVIFACE,
+ ODOTTYPE:
+ return bool2int(varexpr(n.Left) != 0 && varexpr(n.Right) != 0)
+ }
+
+ // Be conservative.
+ return 0
+}
+
+/*
+ * is the name l mentioned in r?
+ */
+func vmatch2(l *Node, r *Node) int {
+ var ll *NodeList
+
+ if r == nil {
+ return 0
+ }
+ switch r.Op {
+ // match each right given left
+ case ONAME:
+ return bool2int(l == r)
+
+ case OLITERAL:
+ return 0
+ }
+
+ if vmatch2(l, r.Left) != 0 {
+ return 1
+ }
+ if vmatch2(l, r.Right) != 0 {
+ return 1
+ }
+ for ll = r.List; ll != nil; ll = ll.Next {
+ if vmatch2(l, ll.N) != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+ * is any name mentioned in l also mentioned in r?
+ * called by sinit.c
+ */
+func vmatch1(l *Node, r *Node) int {
+ var ll *NodeList
+
+ /*
+ * isolate all left sides
+ */
+ if l == nil || r == nil {
+ return 0
+ }
+ switch l.Op {
+ case ONAME:
+ switch l.Class {
+ case PPARAM,
+ PPARAMREF,
+ PAUTO:
+ break
+
+ // assignment to non-stack variable
+ // must be delayed if right has function calls.
+ default:
+ if r.Ullman >= UINF {
+ return 1
+ }
+ }
+
+ return vmatch2(l, r)
+
+ case OLITERAL:
+ return 0
+ }
+
+ if vmatch1(l.Left, r) != 0 {
+ return 1
+ }
+ if vmatch1(l.Right, r) != 0 {
+ return 1
+ }
+ for ll = l.List; ll != nil; ll = ll.Next {
+ if vmatch1(ll.N, r) != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+ * walk through argin parameters.
+ * generate and return code to allocate
+ * copies of escaped parameters to the heap.
+ */
+func paramstoheap(argin **Type, out int) *NodeList {
+ var t *Type
+ var savet Iter
+ var v *Node
+ var as *Node
+ var nn *NodeList
+
+ nn = nil
+ for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
+ v = nil
+ }
+
+ // For precise stacks, the garbage collector assumes results
+ // are always live, so zero them always.
+ if out != 0 {
+ // Defer might stop a panic and show the
+ // return values as they exist at the time of panic.
+ // Make sure to zero them on entry to the function.
+ nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
+ }
+
+ if v == nil || !(v.Class&PHEAP != 0) {
+ continue
+ }
+
+ // generate allocation & copying code
+ if compiling_runtime != 0 {
+ Yyerror("%v escapes to heap, not allowed in runtime.", Nconv(v, 0))
+ }
+ if v.Alloc == nil {
+ v.Alloc = callnew(v.Type)
+ }
+ nn = list(nn, Nod(OAS, v.Heapaddr, v.Alloc))
+ if v.Class&^PHEAP != PPARAMOUT {
+ as = Nod(OAS, v, v.Stackparam)
+ v.Stackparam.Typecheck = 1
+ typecheck(&as, Etop)
+ as = applywritebarrier(as, &nn)
+ nn = list(nn, as)
+ }
+ }
+
+ return nn
+}
+
+/*
+ * walk through argout parameters copying back to stack
+ */
+func returnsfromheap(argin **Type) *NodeList {
+ var t *Type
+ var savet Iter
+ var v *Node
+ var nn *NodeList
+
+ nn = nil
+ for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v == nil || v.Class != PHEAP|PPARAMOUT {
+ continue
+ }
+ nn = list(nn, Nod(OAS, v.Stackparam, v))
+ }
+
+ return nn
+}
+
+/*
+ * take care of migrating any function in/out args
+ * between the stack and the heap. adds code to
+ * curfn's before and after lists.
+ */
+func heapmoves() {
+ var nn *NodeList
+ var lno int32
+
+ lno = lineno
+ lineno = Curfn.Lineno
+ nn = paramstoheap(getthis(Curfn.Type), 0)
+ nn = concat(nn, paramstoheap(getinarg(Curfn.Type), 0))
+ nn = concat(nn, paramstoheap(Getoutarg(Curfn.Type), 1))
+ Curfn.Enter = concat(Curfn.Enter, nn)
+ lineno = Curfn.Endlineno
+ Curfn.Exit = returnsfromheap(Getoutarg(Curfn.Type))
+ lineno = lno
+}
+
+func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
+ var i int
+ var n int
+ var r *Node
+ var args *NodeList
+
+ if fn.Type == nil || fn.Type.Etype != TFUNC {
+ Fatal("mkcall %v %v", Nconv(fn, 0), Tconv(fn.Type, 0))
+ }
+
+ args = nil
+ n = fn.Type.Intuple
+ for i = 0; i < n; i++ {
+ args = list(args, va[i])
+ }
+
+ r = Nod(OCALL, fn, nil)
+ r.List = args
+ if fn.Type.Outtuple > 0 {
+ typecheck(&r, Erv|Efnstruct)
+ } else {
+ typecheck(&r, Etop)
+ }
+ walkexpr(&r, init)
+ r.Type = t
+ return r
+}
+
+func mkcall(name string, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(syslook(name, 0), t, init, args)
+}
+
+func mkcall1(fn *Node, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(fn, t, init, args)
+}
+
+func conv(n *Node, t *Type) *Node {
+ if Eqtype(n.Type, t) {
+ return n
+ }
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ typecheck(&n, Erv)
+ return n
+}
+
+func chanfn(name string, n int, t *Type) *Node {
+ var fn *Node
+ var i int
+
+ if t.Etype != TCHAN {
+ Fatal("chanfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ for i = 0; i < n; i++ {
+ argtype(fn, t.Type)
+ }
+ return fn
+}
+
+func mapfn(name string, t *Type) *Node {
+ var fn *Node
+
+ if t.Etype != TMAP {
+ Fatal("mapfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ return fn
+}
+
+func mapfndel(name string, t *Type) *Node {
+ var fn *Node
+
+ if t.Etype != TMAP {
+ Fatal("mapfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, t.Down)
+ return fn
+}
+
+func writebarrierfn(name string, l *Type, r *Type) *Node {
+ var fn *Node
+
+ fn = syslook(name, 1)
+ argtype(fn, l)
+ argtype(fn, r)
+ return fn
+}
+
+func addstr(n *Node, init **NodeList) *Node {
+ var r *Node
+ var cat *Node
+ var slice *Node
+ var buf *Node
+ var args *NodeList
+ var l *NodeList
+ var c int
+ var sz int64
+ var t *Type
+
+ // orderexpr rewrote OADDSTR to have a list of strings.
+ c = count(n.List)
+
+ if c < 2 {
+ Yyerror("addstr count %d too small", c)
+ }
+
+ buf = nodnil()
+ if n.Esc == EscNone {
+ sz = 0
+ for l = n.List; l != nil; l = l.Next {
+ if n.Op == OLITERAL {
+ sz += int64(len(n.Val.U.Sval.S))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ buf = Nod(OADDR, temp(t), nil)
+ }
+ }
+
+ // build list of string arguments
+ args = list1(buf)
+
+ for l = n.List; l != nil; l = l.Next {
+ args = list(args, conv(l.N, Types[TSTRING]))
+ }
+
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: orderexpr knows this cutoff too.
+ namebuf = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ namebuf = "concatstrings"
+
+ t = typ(TARRAY)
+ t.Type = Types[TSTRING]
+ t.Bound = -1
+ slice = Nod(OCOMPLIT, nil, typenod(t))
+ slice.Alloc = n.Alloc
+ slice.List = args.Next // skip buf arg
+ args = list1(buf)
+ args = list(args, slice)
+ slice.Esc = EscNone
+ }
+
+ cat = syslook(namebuf, 1)
+ r = Nod(OCALL, cat, nil)
+ r.List = args
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+
+ return r
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// if n := len(l1) + len(l2) - cap(s); n > 0 {
+// s = growslice(s, n)
+// }
+// s = s[:len(l1)+len(l2)]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendslice(n *Node, init **NodeList) *Node {
+ var l *NodeList
+ var l1 *Node
+ var l2 *Node
+ var nt *Node
+ var nif *Node
+ var fn *Node
+ var nptr1 *Node
+ var nptr2 *Node
+ var nwid *Node
+ var s *Node
+
+ walkexprlistsafe(n.List, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ for l = n.List; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ l1 = n.List.N
+ l2 = n.List.Next.N
+
+ s = temp(l1.Type) // var s []T
+ l = nil
+ l = list(l, Nod(OAS, s, l1)) // s = l1
+
+ nt = temp(Types[TINT])
+
+ nif = Nod(OIF, nil, nil)
+
+ // n := len(s) + len(l2) - cap(s)
+ nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
+
+ nif.Ntest = Nod(OGT, nt, Nodintconst(0))
+
+ // instantiate growslice(Type*, []any, int64) []any
+ fn = syslook("growslice", 1)
+
+ argtype(fn, s.Type.Type)
+ argtype(fn, s.Type.Type)
+
+ // s = growslice(T, s, n)
+ nif.Nbody = list1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, conv(nt, Types[TINT64]))))
+
+ l = list(l, nif)
+
+ if haspointers(l1.Type.Type) {
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 = l2
+ fn = syslook("typedslicecopy", 1)
+ argtype(fn, l1.Type)
+ argtype(fn, l2.Type)
+ nt = mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
+ l = list(l, nt)
+ } else if flag_race != 0 {
+ // rely on runtime to instrument copy.
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 = l2
+ if l2.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ argtype(fn, l1.Type)
+ argtype(fn, l2.Type)
+ nt = mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+ l = list(l, nt)
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ nptr1 = Nod(OINDEX, s, Nod(OLEN, l1, nil))
+
+ nptr1.Bounded = 1
+ nptr1 = Nod(OADDR, nptr1, nil)
+
+ nptr2 = Nod(OSPTR, l2, nil)
+
+ fn = syslook("memmove", 1)
+ argtype(fn, s.Type.Type) // 1 old []any
+ argtype(fn, s.Type.Type) // 2 ret []any
+
+ nwid = cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+
+ nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
+ nt = mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
+ l = list(l, nt)
+ }
+
+ // s = s[:len(l1)+len(l2)]
+ nt = Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))
+
+ nt = Nod(OSLICE, s, Nod(OKEY, nil, nt))
+ nt.Etype = 1
+ l = list(l, Nod(OAS, s, nt))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return s
+}
+
+// expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkappend(n *Node, init **NodeList) *Node {
+ var l *NodeList
+ var a *NodeList
+ var nsrc *Node
+ var ns *Node
+ var nn *Node
+ var na *Node
+ var nx *Node
+ var fn *Node
+ var argc int
+
+ walkexprlistsafe(n.List, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ for l = n.List; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ nsrc = n.List.N
+
+ // Resolve slice type of multi-valued return.
+ if Istype(nsrc.Type, TSTRUCT) != 0 {
+ nsrc.Type = nsrc.Type.Type.Type
+ }
+ argc = count(n.List) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ l = nil
+
+ ns = temp(nsrc.Type)
+ l = list(l, Nod(OAS, ns, nsrc)) // s = src
+
+ na = Nodintconst(int64(argc)) // const argc
+ nx = Nod(OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.Ntest = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
+
+ fn = syslook("growslice", 1) // growslice(<type>, old []T, n int64) (ret []T)
+ argtype(fn, ns.Type.Type) // 1 old []any
+ argtype(fn, ns.Type.Type) // 2 ret []any
+
+ nx.Nbody = list1(Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, conv(na, Types[TINT64]))))
+
+ l = list(l, nx)
+
+ nn = temp(Types[TINT])
+ l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
+
+ nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
+ nx.Etype = 1
+ l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
+
+ for a = n.List.Next; a != nil; a = a.Next {
+ nx = Nod(OINDEX, ns, nn) // s[n] ...
+ nx.Bounded = 1
+ l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
+ if a.Next != nil {
+ l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
+ }
+ }
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return ns
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func copyany(n *Node, init **NodeList, runtimecall int) *Node {
+ var nl *Node
+ var nr *Node
+ var nfrm *Node
+ var nto *Node
+ var nif *Node
+ var nlen *Node
+ var nwid *Node
+ var fn *Node
+ var l *NodeList
+
+ if haspointers(n.Left.Type.Type) {
+ fn = writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
+ return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
+ }
+
+ if runtimecall != 0 {
+ if n.Right.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ argtype(fn, n.Left.Type)
+ argtype(fn, n.Right.Type)
+ return mkcall1(fn, n.Type, init, n.Left, n.Right, Nodintconst(n.Left.Type.Type.Width))
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ nl = temp(n.Left.Type)
+ nr = temp(n.Right.Type)
+ l = nil
+ l = list(l, Nod(OAS, nl, n.Left))
+ l = list(l, Nod(OAS, nr, n.Right))
+
+ nfrm = Nod(OSPTR, nr, nil)
+ nto = Nod(OSPTR, nl, nil)
+
+ nlen = temp(Types[TINT])
+
+ // n = len(to)
+ l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif = Nod(OIF, nil, nil)
+
+ nif.Ntest = Nod(OGT, nlen, Nod(OLEN, nr, nil))
+ nif.Nbody = list(nif.Nbody, Nod(OAS, nlen, Nod(OLEN, nr, nil)))
+ l = list(l, nif)
+
+ // Call memmove.
+ fn = syslook("memmove", 1)
+
+ argtype(fn, nl.Type.Type)
+ argtype(fn, nl.Type.Type)
+ nwid = temp(Types[TUINTPTR])
+ l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+ nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
+ l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return nlen
+}
+
+// Generate frontend part for OSLICE[3][ARR|STR]
+//
+func sliceany(n *Node, init **NodeList) *Node {
+ var bounded int
+ var slice3 int
+ var src *Node
+ var lb *Node
+ var hb *Node
+ var cb *Node
+ var bound *Node
+ var chk *Node
+ var chk0 *Node
+ var chk1 *Node
+ var chk2 *Node
+ var lbv int64
+ var hbv int64
+ var cbv int64
+ var bv int64
+ var w int64
+ var bt *Type
+
+ // print("before sliceany: %+N\n", n);
+
+ src = n.Left
+
+ lb = n.Right.Left
+ slice3 = bool2int(n.Op == OSLICE3 || n.Op == OSLICE3ARR)
+ if slice3 != 0 {
+ hb = n.Right.Right.Left
+ cb = n.Right.Right.Right
+ } else {
+ hb = n.Right.Right
+ cb = nil
+ }
+
+ bounded = int(n.Etype)
+
+ if n.Op == OSLICESTR {
+ bound = Nod(OLEN, src, nil)
+ } else {
+ bound = Nod(OCAP, src, nil)
+ }
+
+ typecheck(&bound, Erv)
+ walkexpr(&bound, init) // if src is an array, bound will be a const now.
+
+ // static checks if possible
+ bv = 1 << 50
+
+ if Isconst(bound, CTINT) != 0 {
+ if !(Smallintconst(bound) != 0) {
+ Yyerror("array len too large")
+ } else {
+ bv = Mpgetfix(bound.Val.U.Xval)
+ }
+ }
+
+ if Isconst(cb, CTINT) != 0 {
+ cbv = Mpgetfix(cb.Val.U.Xval)
+ if cbv < 0 || cbv > bv {
+ Yyerror("slice index out of bounds")
+ }
+ }
+
+ if Isconst(hb, CTINT) != 0 {
+ hbv = Mpgetfix(hb.Val.U.Xval)
+ if hbv < 0 || hbv > bv {
+ Yyerror("slice index out of bounds")
+ }
+ }
+
+ if Isconst(lb, CTINT) != 0 {
+ lbv = Mpgetfix(lb.Val.U.Xval)
+ if lbv < 0 || lbv > bv {
+ Yyerror("slice index out of bounds")
+ lbv = -1
+ }
+
+ if lbv == 0 {
+ lb = nil
+ }
+ }
+
+ // Checking src[lb:hb:cb] or src[lb:hb].
+ // if chk0 || chk1 || chk2 { panicslice() }
+ chk = nil
+
+ chk0 = nil // cap(src) < cb
+ chk1 = nil // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
+ chk2 = nil // hb < lb
+
+ // All comparisons are unsigned to avoid testing < 0.
+ bt = Types[Simtype[TUINT]]
+
+ if cb != nil && cb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+ if hb != nil && hb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+ if lb != nil && lb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+
+ bound = cheapexpr(conv(bound, bt), init)
+
+ if cb != nil {
+ cb = cheapexpr(conv(cb, bt), init)
+ if !(bounded != 0) {
+ chk0 = Nod(OLT, bound, cb)
+ }
+ } else if slice3 != 0 {
+ // When we figure out what this means, implement it.
+ Fatal("slice3 with cb == N") // rejected by parser
+ }
+
+ if hb != nil {
+ hb = cheapexpr(conv(hb, bt), init)
+ if !(bounded != 0) {
+ if cb != nil {
+ chk1 = Nod(OLT, cb, hb)
+ } else {
+ chk1 = Nod(OLT, bound, hb)
+ }
+ }
+ } else if slice3 != 0 {
+ // When we figure out what this means, implement it.
+ Fatal("slice3 with hb == N") // rejected by parser
+ } else if n.Op == OSLICEARR {
+ hb = bound
+ } else {
+ hb = Nod(OLEN, src, nil)
+ typecheck(&hb, Erv)
+ walkexpr(&hb, init)
+ hb = cheapexpr(conv(hb, bt), init)
+ }
+
+ if lb != nil {
+ lb = cheapexpr(conv(lb, bt), init)
+ if !(bounded != 0) {
+ chk2 = Nod(OLT, hb, lb)
+ }
+ }
+
+ if chk0 != nil || chk1 != nil || chk2 != nil {
+ chk = Nod(OIF, nil, nil)
+ chk.Nbody = list1(mkcall("panicslice", nil, init))
+ chk.Likely = -1
+ if chk0 != nil {
+ chk.Ntest = chk0
+ }
+ if chk1 != nil {
+ if chk.Ntest == nil {
+ chk.Ntest = chk1
+ } else {
+ chk.Ntest = Nod(OOROR, chk.Ntest, chk1)
+ }
+ }
+
+ if chk2 != nil {
+ if chk.Ntest == nil {
+ chk.Ntest = chk2
+ } else {
+ chk.Ntest = Nod(OOROR, chk.Ntest, chk2)
+ }
+ }
+
+ typecheck(&chk, Etop)
+ walkstmt(&chk)
+ *init = concat(*init, chk.Ninit)
+ chk.Ninit = nil
+ *init = list(*init, chk)
+ }
+
+ // prepare new cap, len and offs for backend cgen_slice
+ // cap = bound [ - lo ]
+ n.Right = nil
+
+ n.List = nil
+ if !(slice3 != 0) {
+ cb = bound
+ }
+ if lb == nil {
+ bound = conv(cb, Types[Simtype[TUINT]])
+ } else {
+ bound = Nod(OSUB, conv(cb, Types[Simtype[TUINT]]), conv(lb, Types[Simtype[TUINT]]))
+ }
+ typecheck(&bound, Erv)
+ walkexpr(&bound, init)
+ n.List = list(n.List, bound)
+
+ // len = hi [ - lo]
+ if lb == nil {
+ hb = conv(hb, Types[Simtype[TUINT]])
+ } else {
+ hb = Nod(OSUB, conv(hb, Types[Simtype[TUINT]]), conv(lb, Types[Simtype[TUINT]]))
+ }
+ typecheck(&hb, Erv)
+ walkexpr(&hb, init)
+ n.List = list(n.List, hb)
+
+ // offs = [width *] lo, but omit if zero
+ if lb != nil {
+ if n.Op == OSLICESTR {
+ w = 1
+ } else {
+ w = n.Type.Type.Width
+ }
+ lb = conv(lb, Types[TUINTPTR])
+ if w > 1 {
+ lb = Nod(OMUL, Nodintconst(w), lb)
+ }
+ typecheck(&lb, Erv)
+ walkexpr(&lb, init)
+ n.List = list(n.List, lb)
+ }
+
+ // print("after sliceany: %+N\n", n);
+
+ return n
+}
+
+func eqfor(t *Type, needsize *int) *Node {
+ var a int
+ var n *Node
+ var ntype *Node
+ var sym *Sym
+
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled during type check (OCMPSTR etc).
+ a = algtype1(t, nil)
+
+ if a != AMEM && a != -1 {
+ Fatal("eqfor %v", Tconv(t, 0))
+ }
+
+ if a == AMEM {
+ n = syslook("memequal", 1)
+ argtype(n, t)
+ argtype(n, t)
+ *needsize = 1
+ return n
+ }
+
+ sym = typesymprefix(".eq", t)
+ n = newname(sym)
+ n.Class = PFUNC
+ ntype = Nod(OTFUNC, nil, nil)
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
+ typecheck(&ntype, Etype)
+ n.Type = ntype.Type
+ *needsize = 0
+ return n
+}
+
+func countfield(t *Type) int {
+ var t1 *Type
+ var n int
+
+ n = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ n++
+ }
+ return n
+}
+
+func walkcompare(np **Node, init **NodeList) {
+ var n *Node
+ var l *Node
+ var r *Node
+ var call *Node
+ var a *Node
+ var li *Node
+ var ri *Node
+ var expr *Node
+ var cmpl *Node
+ var cmpr *Node
+ var x *Node
+ var ok *Node
+ var andor int
+ var i int
+ var needsize int
+ var t *Type
+ var t1 *Type
+
+ n = *np
+
+ // Given interface value l and concrete value r, rewrite
+ // l == r
+ // to
+ // x, ok := l.(type(r)); ok && x == r
+ // Handle != similarly.
+ // This avoids the allocation that would be required
+ // to convert r to l for comparison.
+ l = nil
+
+ r = nil
+ if Isinter(n.Left.Type) != 0 && !(Isinter(n.Right.Type) != 0) {
+ l = n.Left
+ r = n.Right
+ } else if !(Isinter(n.Left.Type) != 0) && Isinter(n.Right.Type) != 0 {
+ l = n.Right
+ r = n.Left
+ }
+
+ if l != nil {
+ x = temp(r.Type)
+ ok = temp(Types[TBOOL])
+
+ // l.(type(r))
+ a = Nod(ODOTTYPE, l, nil)
+
+ a.Type = r.Type
+
+ // x, ok := l.(type(r))
+ expr = Nod(OAS2, nil, nil)
+
+ expr.List = list1(x)
+ expr.List = list(expr.List, ok)
+ expr.Rlist = list1(a)
+ typecheck(&expr, Etop)
+ walkexpr(&expr, init)
+
+ if n.Op == OEQ {
+ r = Nod(OANDAND, ok, Nod(OEQ, x, r))
+ } else {
+ r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
+ }
+ *init = list(*init, expr)
+ goto ret
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ t = n.Left.Type
+
+ switch t.Etype {
+ default:
+ return
+
+ case TARRAY:
+ if Isslice(t) != 0 {
+ return
+ }
+
+ case TSTRUCT:
+ break
+ }
+
+ cmpl = n.Left
+ for cmpl != nil && cmpl.Op == OCONVNOP {
+ cmpl = cmpl.Left
+ }
+ cmpr = n.Right
+ for cmpr != nil && cmpr.Op == OCONVNOP {
+ cmpr = cmpr.Left
+ }
+
+ if !(islvalue(cmpl) != 0) || !(islvalue(cmpr) != 0) {
+ Fatal("arguments of comparison must be lvalues - %v %v", Nconv(cmpl, 0), Nconv(cmpr, 0))
+ }
+
+ l = temp(Ptrto(t))
+ a = Nod(OAS, l, Nod(OADDR, cmpl, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ r = temp(Ptrto(t))
+ a = Nod(OAS, r, Nod(OADDR, cmpr, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ expr = nil
+ andor = OANDAND
+ if n.Op == ONE {
+ andor = OOROR
+ }
+
+ if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] != 0 {
+ // Four or fewer elements of a basic type.
+ // Unroll comparisons.
+ for i = 0; int64(i) < t.Bound; i++ {
+ li = Nod(OINDEX, l, Nodintconst(int64(i)))
+ ri = Nod(OINDEX, r, Nodintconst(int64(i)))
+ a = Nod(int(n.Op), li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(bool2int(n.Op == OEQ))
+ }
+ r = expr
+ goto ret
+ }
+
+ if t.Etype == TSTRUCT && countfield(t) <= 4 {
+ // Struct of four or fewer fields.
+ // Inline comparisons.
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if isblanksym(t1.Sym) {
+ continue
+ }
+ li = Nod(OXDOT, l, newname(t1.Sym))
+ ri = Nod(OXDOT, r, newname(t1.Sym))
+ a = Nod(int(n.Op), li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(bool2int(n.Op == OEQ))
+ }
+ r = expr
+ goto ret
+ }
+
+ // Chose not to inline. Call equality function directly.
+ call = Nod(OCALL, eqfor(t, &needsize), nil)
+
+ call.List = list(call.List, l)
+ call.List = list(call.List, r)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(t.Width))
+ }
+ r = call
+ if n.Op != OEQ {
+ r = Nod(ONOT, r, nil)
+ }
+ goto ret
+
+ret:
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ if r.Type != n.Type {
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = n.Type
+ r.Typecheck = 1
+ }
+
+ *np = r
+ return
+}
+
+func samecheap(a *Node, b *Node) int {
+ var ar *Node
+ var br *Node
+ for a != nil && b != nil && a.Op == b.Op {
+ switch a.Op {
+ default:
+ return 0
+
+ case ONAME:
+ return bool2int(a == b)
+
+ case ODOT,
+ ODOTPTR:
+ ar = a.Right
+ br = b.Right
+ if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
+ return 0
+ }
+
+ case OINDEX:
+ ar = a.Right
+ br = b.Right
+ if !(Isconst(ar, CTINT) != 0) || !(Isconst(br, CTINT) != 0) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
+ return 0
+ }
+ }
+
+ a = a.Left
+ b = b.Left
+ }
+
+ return 0
+}
+
+func walkrotate(np **Node) {
+ var w int
+ var sl int
+ var sr int
+ var s int
+ var l *Node
+ var r *Node
+ var n *Node
+
+ if Thearch.Thechar == '9' {
+ return
+ }
+
+ n = *np
+
+ // Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
+ l = n.Left
+
+ r = n.Right
+ if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || Issigned[n.Type.Etype] != 0 || l.Op == r.Op {
+ return
+ }
+
+ // Want same, side effect-free expression on lhs of both shifts.
+ if !(samecheap(l.Left, r.Left) != 0) {
+ return
+ }
+
+ // Constants adding to width?
+ w = int(l.Type.Width * 8)
+
+ if Smallintconst(l.Right) != 0 && Smallintconst(r.Right) != 0 {
+ sl = int(Mpgetfix(l.Right.Val.U.Xval))
+ if sl >= 0 {
+ sr = int(Mpgetfix(r.Right.Val.U.Xval))
+ if sr >= 0 && sl+sr == w {
+ goto yes
+ }
+ }
+ return
+ }
+
+ // TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
+ return
+
+ // Rewrite left shift half to left rotate.
+yes:
+ if l.Op == OLSH {
+ n = l
+ } else {
+ n = r
+ }
+ n.Op = OLROT
+
+ // Remove rotate 0 and rotate w.
+ s = int(Mpgetfix(n.Right.Val.U.Xval))
+
+ if s == 0 || s == w {
+ n = n.Left
+ }
+
+ *np = n
+ return
+}
+
+/*
+ * walkmul rewrites integer multiplication by powers of two as shifts.
+ */
+func walkmul(np **Node, init **NodeList) {
+ var n *Node
+ var nl *Node
+ var nr *Node
+ var pow int
+ var neg int
+ var w int
+
+ n = *np
+ if !(Isint[n.Type.Etype] != 0) {
+ return
+ }
+
+ if n.Right.Op == OLITERAL {
+ nl = n.Left
+ nr = n.Right
+ } else if n.Left.Op == OLITERAL {
+ nl = n.Right
+ nr = n.Left
+ } else {
+ return
+ }
+
+ neg = 0
+
+ // x*0 is 0 (and side effects of x).
+ if Mpgetfix(nr.Val.U.Xval) == 0 {
+ cheapexpr(nl, init)
+ Nodconst(n, n.Type, 0)
+ goto ret
+ }
+
+ // nr is a constant.
+ pow = powtwo(nr)
+
+ if pow < 0 {
+ return
+ }
+ if pow >= 1000 {
+ // negative power of 2, like -16
+ neg = 1
+
+ pow -= 1000
+ }
+
+ w = int(nl.Type.Width * 8)
+ if pow+1 >= w { // too big, shouldn't happen
+ return
+ }
+
+ nl = cheapexpr(nl, init)
+
+ if pow == 0 {
+ // x*1 is x
+ n = nl
+
+ goto ret
+ }
+
+ n = Nod(OLSH, nl, Nodintconst(int64(pow)))
+
+ret:
+ if neg != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+/*
+ * walkdiv rewrites division by a constant as less expensive
+ * operations.
+ */
+func walkdiv(np **Node, init **NodeList) {
+ var n *Node
+ var nl *Node
+ var nr *Node
+ // if >= 0, nr is 1<<pow // 1 if nr is negative.
+ var nc *Node
+ var n1 *Node
+ var n2 *Node
+ var n3 *Node
+ var n4 *Node
+ var pow int
+ var s int
+ var w int
+ var twide *Type
+ var m Magic
+
+ // TODO(minux)
+ if Thearch.Thechar == '9' {
+ return
+ }
+
+ n = *np
+ if n.Right.Op != OLITERAL {
+ return
+ }
+
+ // nr is a constant.
+ nl = cheapexpr(n.Left, init)
+
+ nr = n.Right
+
+ // special cases of mod/div
+ // by a constant
+ w = int(nl.Type.Width * 8)
+
+ s = 0
+ pow = powtwo(nr)
+ if pow >= 1000 {
+ // negative power of 2
+ s = 1
+
+ pow -= 1000
+ }
+
+ if pow+1 >= w {
+ // divisor too large.
+ return
+ }
+
+ if pow < 0 {
+ goto divbymul
+ }
+
+ switch pow {
+ case 0:
+ if n.Op == OMOD {
+ // nl % 1 is zero.
+ Nodconst(n, n.Type, 0)
+ } else if s != 0 {
+ // divide by -1
+ n.Op = OMINUS
+
+ n.Right = nil
+ } else {
+ // divide by 1
+ n = nl
+ }
+
+ default:
+ if Issigned[n.Type.Etype] != 0 {
+ if n.Op == OMOD {
+ // signed modulo 2^pow is like ANDing
+ // with the last pow bits, but if nl < 0,
+ // nl & (2^pow-1) is (nl+1)%2^pow - 1.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ typecheck(&n1, Erv)
+ n1 = cheapexpr(n1, init)
+
+ // n = (nl+ε)&1 -ε where ε=1 iff nl<0.
+ n2 = Nod(OSUB, nl, n1)
+
+ nc = Nod(OXXX, nil, nil)
+ Nodconst(nc, nl.Type, 1)
+ n3 = Nod(OAND, n2, nc)
+ n = Nod(OADD, n3, n1)
+ } else {
+ // n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, (1<<uint(pow))-1)
+ n2 = Nod(OAND, n1, nc) // n2 = 2^pow-1 iff nl<0.
+ typecheck(&n2, Erv)
+ n2 = cheapexpr(n2, init)
+
+ n3 = Nod(OADD, nl, n2)
+ n4 = Nod(OAND, n3, nc)
+ n = Nod(OSUB, n4, n2)
+ }
+
+ break
+ } else {
+ // arithmetic right shift does not give the correct rounding.
+ // if nl >= 0, nl >> n == nl / nr
+ // if nl < 0, we want to add 2^n-1 first.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ // nl+1 is nl-(-1)
+ n.Left = Nod(OSUB, nl, n1)
+ } else {
+ // Do a logical right right on -1 to keep pow bits.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-int64(pow))
+ n2 = Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
+ n.Left = Nod(OADD, nl, conv(n2, nl.Type))
+ }
+
+ // n = (nl + 2^pow-1) >> pow
+ n.Op = ORSH
+
+ nc = Nod(OXXX, nil, nil)
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ n.Right = nc
+ n.Typecheck = 0
+ }
+
+ if s != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ break
+ }
+
+ nc = Nod(OXXX, nil, nil)
+ if n.Op == OMOD {
+ // n = nl & (nr-1)
+ n.Op = OAND
+
+ Nodconst(nc, nl.Type, Mpgetfix(nr.Val.U.Xval)-1)
+ } else {
+ // n = nl >> pow
+ n.Op = ORSH
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ }
+
+ n.Typecheck = 0
+ n.Right = nc
+ }
+
+ goto ret
+
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ // TODO: support 64-bit magic multiply here.
+divbymul:
+ m.W = w
+
+ if Issigned[nl.Type.Etype] != 0 {
+ m.Sd = Mpgetfix(nr.Val.U.Xval)
+ Smagic(&m)
+ } else {
+ m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
+ Umagic(&m)
+ }
+
+ if m.Bad != 0 {
+ return
+ }
+
+ // We have a quick division method so use it
+ // for modulo too.
+ if n.Op == OMOD {
+ goto longmod
+ }
+
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ // n1 = nl * magic >> w (HMUL)
+ case TUINT8,
+ TUINT16,
+ TUINT32:
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, int64(m.Um))
+ n1 = Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Ua != 0 {
+ // Select a Go type with (at least) twice the width.
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ case TUINT8,
+ TUINT16:
+ twide = Types[TUINT32]
+
+ case TUINT32:
+ twide = Types[TUINT64]
+
+ case TINT8,
+ TINT16:
+ twide = Types[TINT32]
+
+ case TINT32:
+ twide = Types[TINT64]
+ }
+
+ // add numerator (might overflow).
+ // n2 = (n1 + nl)
+ n2 = Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = conv(Nod(ORSH, n2, nc), nl.Type)
+ } else {
+ // n = n1 >> m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = Nod(ORSH, n1, nc)
+ }
+
+ // n1 = nl * magic >> w
+ case TINT8,
+ TINT16,
+ TINT32:
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, m.Sm)
+ n1 = Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Sm < 0 {
+ // add the numerator.
+ n1 = Nod(OADD, n1, nl)
+ }
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n2 = conv(Nod(ORSH, n1, nc), nl.Type)
+
+ // add 1 iff n1 is negative.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(w)-1)
+ n3 = Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+ n = Nod(OSUB, n2, n3)
+
+ // apply sign.
+ if m.Sd < 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ }
+
+ goto ret
+
+ // rewrite as A%B = A - (A/B*B).
+longmod:
+ n1 = Nod(ODIV, nl, nr)
+
+ n2 = Nod(OMUL, n1, nr)
+ n = Nod(OSUB, nl, n2)
+ goto ret
+
+ret:
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n *Node, max int64) int {
+ var v int64
+ var bits int32
+ var sign int
+
+ if n.Type == nil || !(Isint[n.Type.Etype] != 0) {
+ return 0
+ }
+
+ sign = int(Issigned[n.Type.Etype])
+ bits = int32(8 * n.Type.Width)
+
+ if Smallintconst(n) != 0 {
+ v = Mpgetfix(n.Val.U.Xval)
+ return bool2int(0 <= v && v < max)
+ }
+
+ switch n.Op {
+ case OAND:
+ v = -1
+ if Smallintconst(n.Left) != 0 {
+ v = Mpgetfix(n.Left.Val.U.Xval)
+ } else if Smallintconst(n.Right) != 0 {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ }
+
+ if 0 <= v && v < max {
+ return 1
+ }
+
+ case OMOD:
+ if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ if 0 <= v && v <= max {
+ return 1
+ }
+ }
+
+ case ODIV:
+ if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ORSH:
+ if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ if v > int64(bits) {
+ return 1
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !(sign != 0) && bits <= 62 && 1<<uint(bits) <= max {
+ return 1
+ }
+
+ return 0
+}
+
+func usefield(n *Node) {
+ var field *Type
+ var l *Type
+
+ if !(obj.Fieldtrack_enabled != 0) {
+ return
+ }
+
+ switch n.Op {
+ default:
+ Fatal("usefield %v", Oconv(int(n.Op), 0))
+ fallthrough
+
+ case ODOT,
+ ODOTPTR:
+ break
+ }
+
+ field = n.Paramfld
+ if field == nil {
+ Fatal("usefield %v %v without paramfld", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
+ }
+ if field.Note == nil || !strings.Contains(field.Note.S, "go:\"track\"") {
+ return
+ }
+
+ // dedup on list
+ if field.Lastfn == Curfn {
+ return
+ }
+ field.Lastfn = Curfn
+ field.Outer = n.Left.Type
+ if Isptr[field.Outer.Etype] != 0 {
+ field.Outer = field.Outer.Type
+ }
+ if field.Outer.Sym == nil {
+ Yyerror("tracked field must be in named struct type")
+ }
+ if !exportname(field.Sym.Name) {
+ Yyerror("tracked field must be exported (upper case)")
+ }
+
+ l = typ(0)
+ l.Type = field
+ l.Down = Curfn.Paramfld
+ Curfn.Paramfld = l
+}
+
+func candiscardlist(l *NodeList) int {
+ for ; l != nil; l = l.Next {
+ if !(candiscard(l.N) != 0) {
+ return 0
+ }
+ }
+ return 1
+}
+
+func candiscard(n *Node) int {
+ if n == nil {
+ return 1
+ }
+
+ switch n.Op {
+ default:
+ return 0
+
+ // Discardable as long as the subpieces are.
+ case ONAME,
+ ONONAME,
+ OTYPE,
+ OPACK,
+ OLITERAL,
+ OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OADDSTR,
+ OADDR,
+ OANDAND,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ OCAP,
+ OCMPIFACE,
+ OCMPSTR,
+ OCOMPLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OARRAYLIT,
+ OPTRLIT,
+ OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ODOT,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGT,
+ OGE,
+ OKEY,
+ OLEN,
+ OMUL,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ ONEW,
+ ONOT,
+ OCOM,
+ OPLUS,
+ OMINUS,
+ OOROR,
+ OPAREN,
+ ORUNESTR,
+ OREAL,
+ OIMAG,
+ OCOMPLEX:
+ break
+
+ // Discardable as long as we know it's not division by zero.
+ case ODIV,
+ OMOD:
+ if Isconst(n.Right, CTINT) != 0 && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
+ break
+ }
+ if Isconst(n.Right, CTFLT) != 0 && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
+ break
+ }
+ return 0
+
+ // Discardable as long as we know it won't fail because of a bad size.
+ case OMAKECHAN,
+ OMAKEMAP:
+ if Isconst(n.Left, CTINT) != 0 && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
+ break
+ }
+ return 0
+
+ // Difficult to tell what sizes are okay.
+ case OMAKESLICE:
+ return 0
+ }
+
+ if !(candiscard(n.Left) != 0) || !(candiscard(n.Right) != 0) || !(candiscard(n.Ntest) != 0) || !(candiscard(n.Nincr) != 0) || !(candiscardlist(n.Ninit) != 0) || !(candiscardlist(n.Nbody) != 0) || !(candiscardlist(n.Nelse) != 0) || !(candiscardlist(n.List) != 0) || !(candiscardlist(n.Rlist) != 0) {
+ return 0
+ }
+
+ return 1
+}
+
+// rewrite
+// print(x, y, z)
+// into
+// func(a1, a2, a3) {
+// print(a1, a2, a3)
+// }(x, y, z)
+// and same for println.
+
+var walkprintfunc_prgen int
+
+func walkprintfunc(np **Node, init **NodeList) {
+ var n *Node
+ var a *Node
+ var fn *Node
+ var t *Node
+ var oldfn *Node
+ var l *NodeList
+ var printargs *NodeList
+ var num int
+ var buf string
+
+ n = *np
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ t = Nod(OTFUNC, nil, nil)
+ num = 0
+ printargs = nil
+ for l = n.List; l != nil; l = l.Next {
+ buf = fmt.Sprintf("a%d", num)
+ num++
+ a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(l.N.Type))
+ t.List = list(t.List, a)
+ printargs = list(printargs, a.Left)
+ }
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ walkprintfunc_prgen++
+ buf = fmt.Sprintf("print·%d", walkprintfunc_prgen)
+ fn.Nname = newname(Lookup(buf))
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = t
+ declare(fn.Nname, PFUNC)
+
+ oldfn = Curfn
+ Curfn = nil
+ funchdr(fn)
+
+ a = Nod(int(n.Op), nil, nil)
+ a.List = printargs
+ typecheck(&a, Etop)
+ walkstmt(&a)
+
+ fn.Nbody = list1(a)
+
+ funcbody(fn)
+
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ xtop = list(xtop, fn)
+ Curfn = oldfn
+
+ a = Nod(OCALL, nil, nil)
+ a.Left = fn.Nname
+ a.List = n.List
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *np = a
+}
diff --git a/src/cmd/internal/gc/y.go b/src/cmd/internal/gc/y.go
new file mode 100644
index 0000000000..0e9157cf0d
--- /dev/null
+++ b/src/cmd/internal/gc/y.go
@@ -0,0 +1,3524 @@
+//line go.y:21
+package gc
+
+import __yyfmt__ "fmt"
+
+//line go.y:21
+import (
+ "strings"
+)
+
+//line go.y:27
+type yySymType struct {
+ yys int
+ node *Node
+ list *NodeList
+ typ *Type
+ sym *Sym
+ val Val
+ i int
+}
+
+const LLITERAL = 57346
+const LASOP = 57347
+const LCOLAS = 57348
+const LBREAK = 57349
+const LCASE = 57350
+const LCHAN = 57351
+const LCONST = 57352
+const LCONTINUE = 57353
+const LDDD = 57354
+const LDEFAULT = 57355
+const LDEFER = 57356
+const LELSE = 57357
+const LFALL = 57358
+const LFOR = 57359
+const LFUNC = 57360
+const LGO = 57361
+const LGOTO = 57362
+const LIF = 57363
+const LIMPORT = 57364
+const LINTERFACE = 57365
+const LMAP = 57366
+const LNAME = 57367
+const LPACKAGE = 57368
+const LRANGE = 57369
+const LRETURN = 57370
+const LSELECT = 57371
+const LSTRUCT = 57372
+const LSWITCH = 57373
+const LTYPE = 57374
+const LVAR = 57375
+const LANDAND = 57376
+const LANDNOT = 57377
+const LBODY = 57378
+const LCOMM = 57379
+const LDEC = 57380
+const LEQ = 57381
+const LGE = 57382
+const LGT = 57383
+const LIGNORE = 57384
+const LINC = 57385
+const LLE = 57386
+const LLSH = 57387
+const LLT = 57388
+const LNE = 57389
+const LOROR = 57390
+const LRSH = 57391
+const NotPackage = 57392
+const NotParen = 57393
+const PreferToRightParen = 57394
+
+var yyToknames = []string{
+ "LLITERAL",
+ "LASOP",
+ "LCOLAS",
+ "LBREAK",
+ "LCASE",
+ "LCHAN",
+ "LCONST",
+ "LCONTINUE",
+ "LDDD",
+ "LDEFAULT",
+ "LDEFER",
+ "LELSE",
+ "LFALL",
+ "LFOR",
+ "LFUNC",
+ "LGO",
+ "LGOTO",
+ "LIF",
+ "LIMPORT",
+ "LINTERFACE",
+ "LMAP",
+ "LNAME",
+ "LPACKAGE",
+ "LRANGE",
+ "LRETURN",
+ "LSELECT",
+ "LSTRUCT",
+ "LSWITCH",
+ "LTYPE",
+ "LVAR",
+ "LANDAND",
+ "LANDNOT",
+ "LBODY",
+ "LCOMM",
+ "LDEC",
+ "LEQ",
+ "LGE",
+ "LGT",
+ "LIGNORE",
+ "LINC",
+ "LLE",
+ "LLSH",
+ "LLT",
+ "LNE",
+ "LOROR",
+ "LRSH",
+ "'+'",
+ "'-'",
+ "'|'",
+ "'^'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "'&'",
+ "NotPackage",
+ "NotParen",
+ "'('",
+ "')'",
+ "PreferToRightParen",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line go.y:2242
+func fixlbrace(lbr int) {
+ // If the opening brace was an LBODY,
+ // set up for another one now that we're done.
+ // See comment in lex.C about loophack.
+ if lbr == LBODY {
+ loophack = 1
+ }
+}
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 0,
+ -1, 17,
+ 1, 1,
+ 63, 23,
+ -2, 0,
+ -1, 48,
+ 6, 276,
+ 66, 276,
+ 76, 276,
+ -2, 49,
+ -1, 56,
+ 67, 153,
+ -2, 162,
+ -1, 74,
+ 60, 181,
+ -2, 215,
+ -1, 75,
+ 60, 182,
+ -2, 183,
+ -1, 121,
+ 60, 134,
+ 64, 134,
+ 68, 134,
+ 72, 134,
+ -2, 266,
+ -1, 125,
+ 60, 134,
+ 64, 134,
+ 68, 134,
+ 72, 134,
+ -2, 267,
+ -1, 176,
+ 2, 215,
+ 36, 215,
+ 60, 181,
+ 68, 215,
+ -2, 173,
+ -1, 177,
+ 36, 183,
+ 60, 182,
+ 68, 183,
+ -2, 174,
+ -1, 184,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 242,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 252,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 325,
+ 4, 236,
+ 63, 236,
+ 69, 236,
+ -2, 157,
+ -1, 407,
+ 36, 176,
+ 60, 176,
+ 68, 176,
+ -2, 167,
+ -1, 408,
+ 36, 177,
+ 60, 177,
+ 68, 177,
+ -2, 168,
+ -1, 409,
+ 36, 178,
+ 60, 178,
+ 68, 178,
+ -2, 169,
+ -1, 410,
+ 36, 179,
+ 60, 179,
+ 68, 179,
+ -2, 170,
+ -1, 416,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 417,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 497,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 552,
+ 60, 157,
+ -2, 318,
+ -1, 553,
+ 60, 158,
+ -2, 317,
+ -1, 578,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 592,
+ 36, 180,
+ 60, 180,
+ 68, 180,
+ -2, 171,
+}
+
+const yyNprod = 352
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 2282
+
+var yyAct = []int{
+
+ 74, 381, 304, 285, 291, 486, 610, 398, 545, 478,
+ 549, 296, 186, 75, 400, 229, 302, 401, 103, 389,
+ 458, 356, 290, 318, 457, 34, 303, 338, 230, 245,
+ 466, 109, 339, 101, 337, 332, 85, 104, 374, 248,
+ 246, 174, 467, 286, 14, 324, 479, 328, 241, 212,
+ 108, 6, 325, 155, 243, 469, 226, 181, 468, 516,
+ 413, 320, 373, 392, 325, 219, 13, 208, 176, 10,
+ 11, 584, 172, 469, 651, 385, 599, 583, 106, 191,
+ 13, 177, 460, 541, 422, 160, 310, 331, 613, 161,
+ 309, 446, 192, 322, 193, 626, 327, 162, 198, 321,
+ 88, 12, 13, 10, 227, 238, 662, 194, 317, 227,
+ 632, 448, 227, 12, 13, 227, 209, 228, 12, 13,
+ 447, 10, 228, 203, 175, 228, 108, 393, 228, 461,
+ 54, 660, 205, 445, 184, 384, 222, 460, 459, 204,
+ 199, 200, 239, 88, 506, 155, 214, 216, 218, 507,
+ 427, 631, 12, 13, 233, 625, 624, 202, 10, 88,
+ 90, 176, 55, 288, 10, 627, 213, 213, 213, 213,
+ 12, 13, 118, 118, 177, 295, 126, 154, 308, 176,
+ 10, 416, 282, 282, 461, 282, 603, 620, 416, 10,
+ 600, 227, 177, 301, 593, 416, 227, 227, 404, 227,
+ 280, 484, 444, 90, 228, 622, 536, 12, 13, 228,
+ 228, 506, 228, 12, 13, 86, 507, 175, 527, 90,
+ 298, 163, 164, 165, 166, 167, 168, 169, 170, 12,
+ 13, 523, 227, 580, 515, 175, 182, 153, 12, 13,
+ 242, 171, 325, 397, 416, 228, 330, 155, 227, 334,
+ 415, 227, 227, 116, 227, 185, 358, 367, 463, 371,
+ 360, 228, 355, 362, 228, 228, 353, 228, 183, 365,
+ 210, 322, 504, 369, 434, 314, 68, 321, 91, 379,
+ 614, 78, 416, 340, 609, 340, 340, 376, 375, 182,
+ 124, 12, 13, 604, 176, 83, 79, 10, 394, 325,
+ 407, 336, 82, 351, 352, 10, 378, 177, 380, 414,
+ 227, 227, 601, 408, 574, 409, 608, 10, 568, 558,
+ 227, 183, 48, 228, 228, 465, 10, 464, 410, 391,
+ 323, 329, 67, 228, 331, 348, 443, 656, 442, 412,
+ 293, 163, 170, 605, 77, 436, 12, 13, 12, 13,
+ 175, 424, 423, 234, 12, 13, 388, 383, 370, 366,
+ 359, 114, 435, 333, 655, 227, 12, 13, 100, 129,
+ 441, 99, 10, 490, 227, 12, 13, 439, 228, 84,
+ 454, 20, 453, 429, 432, 480, 491, 228, 492, 654,
+ 173, 10, 508, 473, 176, 10, 653, 645, 511, 619,
+ 188, 493, 483, 494, 616, 607, 227, 177, 221, 282,
+ 514, 606, 227, 597, 282, 519, 520, 340, 340, 228,
+ 596, 12, 13, 227, 595, 228, 110, 498, 340, 489,
+ 107, 510, 502, 592, 525, 449, 228, 582, 517, 227,
+ 12, 13, 562, 524, 12, 13, 470, 539, 528, 531,
+ 175, 522, 228, 254, 513, 512, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 129, 129, 277, 554,
+ 559, 227, 330, 173, 537, 294, 509, 557, 561, 227,
+ 69, 564, 532, 538, 228, 534, 490, 490, 496, 495,
+ 482, 572, 228, 300, 476, 475, 472, 176, 440, 491,
+ 491, 492, 492, 567, 573, 340, 10, 340, 420, 553,
+ 177, 577, 372, 340, 493, 493, 340, 590, 591, 579,
+ 297, 585, 570, 540, 586, 542, 456, 551, 431, 438,
+ 249, 555, 340, 571, 556, 354, 253, 129, 251, 180,
+ 431, 102, 489, 489, 382, 323, 501, 530, 287, 129,
+ 566, 117, 7, 175, 70, 12, 13, 329, 5, 197,
+ 431, 227, 211, 433, 24, 16, 529, 19, 617, 430,
+ 650, 455, 364, 533, 228, 428, 560, 480, 305, 335,
+ 207, 206, 21, 93, 197, 623, 252, 629, 490, 197,
+ 282, 630, 197, 635, 120, 197, 26, 386, 121, 125,
+ 637, 491, 340, 492, 641, 639, 173, 340, 621, 402,
+ 57, 565, 306, 76, 402, 618, 493, 158, 176, 642,
+ 598, 387, 511, 340, 157, 602, 640, 665, 652, 581,
+ 28, 177, 390, 643, 223, 644, 490, 159, 156, 235,
+ 96, 657, 240, 661, 489, 497, 578, 417, 98, 491,
+ 663, 492, 94, 664, 122, 122, 31, 22, 667, 666,
+ 340, 15, 97, 95, 493, 553, 23, 201, 340, 49,
+ 18, 197, 594, 129, 175, 3, 197, 197, 636, 197,
+ 129, 282, 8, 551, 4, 2, 1, 450, 215, 543,
+ 544, 547, 489, 548, 611, 92, 487, 129, 129, 189,
+ 80, 81, 437, 72, 71, 237, 173, 615, 477, 316,
+ 188, 220, 197, 326, 340, 244, 128, 340, 648, 628,
+ 649, 311, 127, 17, 399, 319, 312, 313, 197, 315,
+ 25, 197, 197, 27, 197, 36, 633, 634, 78, 37,
+ 281, 66, 111, 638, 39, 38, 35, 124, 279, 278,
+ 73, 217, 83, 79, 10, 113, 587, 149, 503, 82,
+ 505, 87, 363, 0, 123, 0, 232, 150, 0, 0,
+ 9, 151, 141, 142, 143, 144, 145, 146, 147, 148,
+ 197, 377, 56, 196, 89, 0, 0, 0, 0, 231,
+ 197, 197, 0, 0, 0, 105, 105, 112, 115, 0,
+ 197, 77, 0, 12, 13, 426, 119, 119, 0, 0,
+ 119, 0, 575, 576, 0, 0, 0, 0, 0, 173,
+ 0, 0, 0, 275, 276, 0, 283, 0, 0, 402,
+ 406, 588, 402, 402, 0, 0, 0, 0, 0, 0,
+ 418, 419, 0, 0, 0, 197, 0, 0, 78, 0,
+ 425, 89, 0, 197, 197, 0, 0, 124, 0, 0,
+ 0, 0, 83, 79, 10, 0, 0, 105, 149, 82,
+ 0, 0, 105, 0, 0, 112, 232, 0, 150, 247,
+ 0, 0, 151, 0, 0, 0, 197, 145, 146, 147,
+ 148, 0, 197, 196, 361, 406, 0, 188, 0, 231,
+ 0, 0, 0, 197, 0, 236, 368, 78, 0, 0,
+ 250, 77, 0, 12, 13, 225, 124, 0, 0, 197,
+ 0, 83, 79, 10, 0, 0, 292, 0, 82, 0,
+ 0, 0, 0, 0, 0, 232, 311, 0, 646, 647,
+ 173, 0, 521, 402, 0, 0, 0, 0, 0, 56,
+ 0, 0, 196, 526, 0, 0, 0, 0, 231, 0,
+ 0, 197, 0, 0, 119, 119, 0, 0, 0, 197,
+ 77, 0, 12, 13, 0, 0, 197, 197, 0, 0,
+ 0, 0, 134, 149, 357, 152, 0, 135, 139, 140,
+ 105, 0, 138, 150, 137, 136, 133, 151, 141, 142,
+ 143, 144, 145, 146, 147, 148, 0, 56, 0, 0,
+ 0, 569, 0, 0, 0, 0, 247, 56, 247, 0,
+ 68, 0, 0, 0, 413, 78, 0, 0, 0, 78,
+ 474, 0, 0, 0, 124, 0, 0, 481, 124, 83,
+ 79, 10, 0, 83, 79, 10, 82, 0, 0, 0,
+ 82, 197, 0, 65, 275, 276, 0, 232, 0, 0,
+ 0, 0, 0, 0, 0, 0, 60, 61, 0, 64,
+ 58, 0, 0, 59, 196, 0, 67, 0, 197, 421,
+ 488, 0, 0, 0, 403, 0, 62, 63, 77, 0,
+ 12, 13, 77, 0, 12, 13, 0, 68, 89, 0,
+ 0, 0, 78, 0, 0, 0, 0, 0, 0, 0,
+ 0, 124, 0, 347, 0, 462, 83, 79, 10, 357,
+ 0, 0, 349, 82, 105, 0, 197, 345, 343, 341,
+ 65, 105, 0, 0, 344, 112, 0, 485, 247, 0,
+ 0, 348, 0, 60, 61, 0, 64, 58, 0, 0,
+ 59, 0, 0, 67, 0, 0, 0, 78, 346, 0,
+ 0, 589, 0, 62, 63, 77, 124, 12, 13, 0,
+ 350, 83, 79, 10, 0, 0, 342, 0, 82, 13,
+ 0, 56, 56, 0, 0, 232, 0, 0, 0, 119,
+ 0, 119, 0, 0, 0, 0, 0, 0, 0, 535,
+ 0, 119, 196, 247, 0, 0, 0, 0, 231, 0,
+ 0, 0, 546, 550, 0, 0, 0, 0, 0, 0,
+ 77, 357, 12, 13, 462, 0, 0, 0, 462, 0,
+ 0, 0, 0, 563, 357, 0, 0, 0, 0, 0,
+ 0, 0, 307, 0, 68, 0, 0, 41, 0, 78,
+ 47, 42, 0, 247, 44, 0, 40, 50, 124, 43,
+ 45, 53, 56, 83, 79, 10, 0, 0, 46, 52,
+ 82, 51, 32, 30, 0, 0, 0, 65, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 0,
+ 67, 0, 0, 0, 0, 0, 0, 0, 308, 0,
+ 62, 63, 77, 0, 12, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 29, 105, 68, 247, 0, 41, 0, 78,
+ 47, 42, 0, 56, 44, 0, 40, 50, 33, 43,
+ 45, 53, 0, 83, 79, 10, 0, 0, 46, 52,
+ 82, 51, 32, 30, 0, 0, 546, 65, 0, 550,
+ 357, 0, 0, 462, 0, 0, 0, 357, 0, 357,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+ 67, 0, 0, 0, 78, 0, 0, 0, 0, 0,
+ 62, 63, 77, 124, 12, 13, 0, 0, 83, 79,
+ 10, 0, 500, 0, 0, 82, 0, 0, 0, 0,
+ 0, 0, 65, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 60, 61, 0, 64, 58,
+ 0, 0, 59, 0, 68, 67, 0, 0, 0, 78,
+ 0, 0, 0, 0, 0, 62, 63, 77, 124, 12,
+ 13, 0, 0, 83, 79, 10, 0, 499, 0, 0,
+ 82, 0, 0, 0, 0, 0, 0, 65, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+ 67, 0, 0, 0, 78, 0, 0, 0, 299, 0,
+ 62, 63, 77, 124, 12, 13, 0, 124, 83, 79,
+ 10, 0, 83, 79, 10, 82, 0, 395, 0, 82,
+ 0, 0, 179, 0, 0, 0, 232, 0, 0, 0,
+ 0, 0, 68, 0, 0, 60, 61, 78, 64, 178,
+ 0, 0, 59, 196, 0, 67, 124, 0, 0, 231,
+ 0, 83, 79, 10, 0, 62, 63, 77, 82, 12,
+ 13, 77, 0, 12, 13, 179, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 60, 61,
+ 0, 64, 178, 0, 0, 59, 0, 68, 67, 289,
+ 0, 0, 78, 0, 0, 0, 0, 0, 62, 63,
+ 77, 124, 12, 13, 0, 0, 83, 79, 10, 0,
+ 284, 0, 0, 82, 0, 0, 0, 0, 0, 0,
+ 65, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 68, 0, 0, 60, 61, 78, 64, 58, 187, 0,
+ 59, 0, 0, 67, 124, 0, 0, 0, 0, 83,
+ 79, 10, 0, 62, 63, 77, 82, 12, 13, 0,
+ 0, 0, 0, 65, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 68, 0, 0, 60, 61, 78, 64,
+ 58, 0, 0, 59, 0, 0, 67, 124, 0, 0,
+ 0, 0, 83, 79, 10, 0, 62, 63, 77, 82,
+ 12, 13, 0, 0, 0, 0, 65, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 68, 0, 0, 60,
+ 61, 78, 64, 58, 0, 0, 59, 0, 0, 67,
+ 124, 0, 0, 0, 0, 83, 79, 10, 0, 62,
+ 63, 77, 82, 12, 13, 0, 0, 0, 0, 179,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 60, 61, 0, 64, 178, 0, 0, 59,
+ 0, 0, 67, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 62, 63, 77, 0, 12, 13, 134, 149,
+ 0, 152, 0, 135, 139, 140, 0, 0, 138, 150,
+ 137, 136, 133, 151, 141, 142, 143, 144, 145, 146,
+ 147, 148, 68, 0, 0, 0, 0, 299, 0, 0,
+ 0, 0, 0, 0, 0, 0, 124, 396, 347, 0,
+ 0, 83, 79, 10, 0, 0, 0, 349, 82, 78,
+ 0, 0, 345, 343, 552, 65, 0, 0, 124, 344,
+ 0, 0, 0, 83, 79, 10, 348, 0, 60, 61,
+ 82, 64, 58, 0, 0, 59, 0, 232, 67, 0,
+ 0, 0, 0, 346, 0, 0, 0, 0, 62, 63,
+ 77, 0, 12, 13, 196, 0, 0, 0, 0, 0,
+ 231, 342, 0, 12, 13, 0, 224, 0, 0, 0,
+ 0, 0, 77, 0, 12, 13, 225, 134, 149, 0,
+ 152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+ 136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+ 148, 134, 149, 0, 152, 0, 135, 139, 140, 0,
+ 659, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+ 144, 145, 146, 147, 148, 134, 149, 0, 152, 0,
+ 135, 139, 140, 0, 658, 138, 150, 137, 136, 133,
+ 151, 141, 142, 143, 144, 145, 146, 147, 148, 0,
+ 78, 0, 0, 0, 78, 0, 0, 0, 518, 124,
+ 0, 0, 0, 124, 83, 79, 10, 0, 83, 79,
+ 10, 82, 0, 0, 0, 82, 347, 0, 405, 0,
+ 0, 0, 190, 0, 0, 349, 0, 0, 0, 0,
+ 345, 343, 341, 0, 0, 196, 0, 344, 0, 196,
+ 0, 411, 0, 0, 348, 195, 0, 0, 0, 347,
+ 0, 0, 471, 77, 0, 12, 13, 77, 349, 12,
+ 13, 346, 0, 345, 343, 341, 0, 612, 0, 347,
+ 344, 0, 0, 0, 0, 0, 0, 348, 349, 342,
+ 0, 0, 13, 345, 343, 341, 0, 0, 0, 347,
+ 344, 0, 0, 0, 346, 0, 0, 452, 349, 0,
+ 0, 0, 0, 345, 343, 341, 0, 0, 0, 0,
+ 344, 0, 342, 0, 346, 13, 0, 348, 0, 0,
+ 451, 0, 0, 0, 130, 0, 0, 0, 0, 0,
+ 0, 0, 342, 0, 346, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 342, 134, 149, 13, 152, 132, 135, 139,
+ 140, 0, 131, 138, 150, 137, 136, 133, 151, 141,
+ 142, 143, 144, 145, 146, 147, 148, 134, 149, 0,
+ 152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+ 136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+ 148, 134, 149, 0, 0, 0, 135, 139, 140, 0,
+ 0, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+ 144, 145, 146, 147, 148, 134, 149, 0, 0, 0,
+ 135, 139, 140, 0, 0, 138, 150, 137, 136, 0,
+ 151, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 0, 0, 0, 135, 139, 140, 0, 0, 138, 150,
+ 137, 136, 0, 151, 141, 142, 143, 144, 145, 146,
+ 147, 148,
+}
+var yyPact = []int{
+
+ -1000, -1000, 542, 536, -1000, 164, -1000, 550, 555, 318,
+ -1000, -1000, -1000, 588, -1000, -1000, 549, 1340, 316, 155,
+ -1000, 214, 640, 308, -1000, 305, -1000, -1000, -1000, -1000,
+ 491, 370, 366, 301, -1000, -1000, -1000, -1000, -1000, 186,
+ -1000, 164, 164, 272, 272, 164, 1689, -1000, 2129, 171,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 25, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 173, 1732, -1000, -1000,
+ -1000, 489, 200, -1000, -1000, -1000, 253, 1646, 1995, 26,
+ -1000, -1000, 200, 200, -1000, -1000, 96, 536, -1000, 587,
+ 586, 42, 205, -1000, 547, -9, -9, -9, 5, -1000,
+ -1000, -1000, 347, 1850, -1000, -1000, -1000, 292, 849, -1000,
+ 44, 1158, -1000, 172, 908, 488, -1000, -1000, -1000, -1000,
+ -1000, -1000, 25, -1000, 486, -1000, -1000, -1000, -23, 2153,
+ 1689, -1000, -1000, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 1603, 1689, 522, 1689,
+ 1548, 280, 1689, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 469, 2153, -1000, -1000, -1000, -1000, 1732, 1828,
+ 1689, -1000, -1000, -1000, 1250, -1000, 17, 13, 2153, -1000,
+ 1158, -1000, -1000, -1000, -1000, 1158, 1158, 211, 1158, 39,
+ 27, 300, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 585, 2090, -1000, 1114, 2090, -1000, 172, 485, 164,
+ 297, -1000, -1000, 194, 1689, 164, -1000, -1000, -1000, -1000,
+ -1000, 1158, 573, 296, -1000, 191, 1689, 295, -1000, -1000,
+ -1000, -1000, 1250, 461, -14, -1000, -1000, 908, -1000, -1000,
+ 1158, 908, 1250, 908, 2153, 2201, 2224, 732, 732, 732,
+ 732, 732, 732, 843, 843, 843, 843, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 2177, -23, -23, 2153, -1000, 518,
+ 294, -1000, -1000, 69, 1689, -1000, 293, -1000, -1000, -1000,
+ 51, -1000, -1000, 1505, 1774, 176, 1026, 130, -1000, 1991,
+ 958, 1026, 181, -1000, -1000, -1000, -1000, -1000, -1000, 1158,
+ 1158, -1000, 457, -1000, 164, 11, 288, -1000, -1000, 739,
+ 581, 525, 513, -1000, -1000, 210, 282, -1000, -1000, 479,
+ -1000, 545, 447, 139, -1000, 275, 273, -1000, -1000, -1000,
+ -1000, -1000, 129, 19, 52, 43, 2090, 2070, 572, 476,
+ 78, 192, 264, 262, 164, -3, -1000, 2050, 445, 164,
+ 1689, -23, -1000, 444, 1158, 443, 164, 1689, -23, 439,
+ 164, 132, 1030, 908, -1000, -1000, -1000, -1000, 438, -1000,
+ 437, -1000, -1000, 1689, 1450, 1395, 2153, 520, 1689, 203,
+ 518, 425, -16, 1732, 394, 393, -1000, 1689, 165, -17,
+ -1000, -1000, 1941, -1000, -1000, 1509, -1000, -1000, -1000, -1000,
+ -1000, 1158, 390, -1000, 162, -1000, 1250, 1250, -1000, -1000,
+ -1000, -1000, 1158, 149, 217, 581, 164, -1000, -1000, 388,
+ 545, 210, 581, 545, 164, 137, 274, -1000, 908, 386,
+ -1000, -1000, -1000, -1000, 2090, 10, 2090, 164, 1839, -1000,
+ -1000, 298, 2090, -1000, -1000, 2090, 164, 256, -1000, 133,
+ -1000, 582, -1000, 78, -1000, -1000, 381, -21, 164, 164,
+ 581, 2090, -1000, -1000, -23, -1000, -1000, 255, -1000, -1000,
+ 849, -23, -1000, -1000, -1000, 472, -1000, -1000, 908, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1030, 1030, 1250, 251, 1689,
+ 1689, -1000, -1000, -1000, -1000, -1000, 1732, 166, -1000, -1000,
+ 376, -1000, -1000, -1000, 4, -1000, 1026, -1000, 1103, 1026,
+ 1026, 372, -1000, -1000, -1000, 125, -1000, -1000, -1000, -1000,
+ -1000, 581, 363, -1000, 359, -1000, -1000, -1000, 352, -1000,
+ -1000, 2090, 3, 121, 249, -1000, 2090, 117, 230, -1000,
+ 283, -1000, -1000, -1000, 350, -1000, -1000, 344, -1000, 266,
+ -1000, 221, 2017, 220, -1000, -1000, 581, 343, 164, 191,
+ 908, 338, -1000, 118, 1689, 2153, 2153, 136, 1250, 89,
+ -1000, -1000, -1000, -1000, 1689, -1000, -1000, -1000, 2153, -1000,
+ 82, 41, -1000, -1000, -1000, 581, 581, 1030, -1000, 2090,
+ -1000, 164, 581, -1000, 1839, 164, -1000, 2017, 133, -1000,
+ -1000, -1000, 164, -1000, 164, -1000, -1000, -1000, 336, -1000,
+ -1000, -1000, -1000, 219, -1000, 1689, 1689, 1732, 565, 1,
+ 1026, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 335, -1000, 328, 303, 276, 1030, 1917, 1893, -1000, -1000,
+ 110, -1000, 37, 2017, -1000, -1000, 2017, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1689, 518, -1000,
+}
+var yyPgo = []int{
+
+ 0, 57, 771, 774, 45, 150, 26, 540, 29, 770,
+ 768, 2, 28, 61, 322, 766, 17, 4, 765, 761,
+ 760, 759, 758, 756, 3, 755, 622, 47, 14, 754,
+ 490, 40, 41, 130, 37, 12, 752, 561, 43, 620,
+ 751, 564, 750, 749, 25, 745, 162, 743, 31, 11,
+ 740, 48, 5, 1, 18, 735, 679, 734, 7, 22,
+ 733, 732, 19, 730, 729, 728, 16, 54, 725, 723,
+ 33, 721, 23, 719, 588, 46, 9, 718, 715, 714,
+ 713, 39, 712, 711, 710, 15, 56, 709, 13, 706,
+ 0, 70, 49, 24, 20, 21, 10, 8, 704, 6,
+ 42, 30, 703, 701, 700, 699, 88, 34, 698, 32,
+ 27, 697, 696, 695, 694, 692, 685, 51, 44, 680,
+ 36, 677, 35, 676, 671, 667, 666, 657, 656, 655,
+ 648, 647, 642, 639, 637, 634, 631, 627, 38, 623,
+ 596, 593,
+}
+var yyR1 = []int{
+
+ 0, 112, 114, 114, 116, 113, 115, 115, 119, 119,
+ 119, 120, 120, 121, 121, 2, 2, 2, 117, 123,
+ 123, 124, 118, 50, 50, 50, 50, 50, 74, 74,
+ 74, 74, 74, 74, 74, 74, 74, 74, 126, 70,
+ 70, 70, 75, 75, 76, 76, 76, 36, 48, 44,
+ 44, 44, 44, 44, 44, 9, 9, 9, 9, 127,
+ 11, 128, 10, 62, 62, 129, 53, 42, 42, 42,
+ 22, 22, 22, 21, 130, 23, 24, 24, 131, 132,
+ 133, 25, 134, 63, 64, 64, 65, 65, 135, 136,
+ 45, 137, 43, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 41, 41, 41, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 49, 28, 16, 16, 15, 15, 39, 39, 17, 17,
+ 31, 1, 1, 33, 34, 37, 37, 3, 3, 3,
+ 91, 91, 30, 29, 81, 81, 7, 7, 7, 7,
+ 7, 7, 32, 32, 32, 32, 87, 87, 87, 87,
+ 87, 79, 79, 80, 89, 89, 89, 89, 89, 12,
+ 12, 88, 88, 88, 88, 88, 88, 88, 85, 86,
+ 84, 84, 83, 83, 47, 18, 18, 19, 19, 90,
+ 51, 51, 52, 52, 52, 139, 20, 20, 60, 60,
+ 71, 71, 77, 77, 78, 78, 73, 73, 69, 69,
+ 72, 72, 72, 72, 72, 72, 4, 4, 13, 27,
+ 27, 27, 82, 8, 8, 8, 8, 68, 68, 67,
+ 67, 6, 6, 6, 6, 6, 26, 26, 26, 26,
+ 26, 140, 26, 26, 26, 26, 26, 26, 26, 26,
+ 66, 66, 55, 55, 54, 54, 56, 56, 59, 59,
+ 57, 57, 57, 57, 58, 58, 122, 122, 138, 138,
+ 35, 35, 61, 61, 38, 38, 101, 101, 105, 105,
+ 103, 103, 5, 5, 141, 141, 141, 141, 141, 141,
+ 92, 108, 106, 106, 106, 111, 111, 107, 107, 107,
+ 107, 107, 107, 107, 107, 107, 107, 107, 110, 109,
+ 95, 95, 97, 96, 96, 99, 99, 98, 98, 94,
+ 94, 94, 93, 93, 125, 125, 100, 100, 104, 104,
+ 102, 102,
+}
+var yyR2 = []int{
+
+ 0, 4, 0, 3, 0, 3, 0, 3, 2, 5,
+ 3, 3, 2, 1, 3, 1, 2, 2, 4, 0,
+ 1, 0, 4, 0, 1, 1, 1, 1, 2, 5,
+ 3, 2, 5, 7, 3, 2, 5, 3, 1, 2,
+ 4, 3, 4, 3, 1, 2, 1, 1, 2, 1,
+ 3, 3, 3, 2, 2, 3, 5, 5, 2, 0,
+ 4, 0, 3, 0, 2, 0, 4, 4, 4, 2,
+ 5, 1, 1, 2, 0, 3, 1, 3, 0, 0,
+ 0, 8, 0, 5, 0, 2, 0, 2, 0, 0,
+ 7, 0, 5, 1, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 5, 6, 1, 1, 3, 5,
+ 5, 4, 6, 8, 1, 5, 5, 5, 7, 1,
+ 0, 3, 1, 4, 1, 4, 1, 3, 1, 1,
+ 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
+ 4, 4, 1, 1, 1, 2, 1, 1, 1, 1,
+ 1, 3, 1, 1, 1, 2, 1, 1, 1, 1,
+ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 4, 4, 2, 3, 5, 1, 1, 2, 3,
+ 5, 3, 5, 3, 3, 5, 8, 5, 8, 5,
+ 0, 3, 0, 1, 3, 1, 4, 2, 0, 3,
+ 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 3, 2, 4, 3, 5, 5, 1, 3, 1, 2,
+ 1, 3, 4, 1, 2, 2, 1, 1, 3, 0,
+ 2, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 4, 1, 2, 2, 2, 2, 2, 2,
+ 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 1, 1, 3, 3, 0, 2, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 4, 4, 5, 6, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
+ 4, 5, 4, 4, 2, 2, 4, 3, 3, 5,
+ 3, 4, 3, 5, 1, 0, 1, 3, 1, 1,
+ 2, 1, 1, 5, 0, 2, 1, 3, 1, 3,
+ 1, 3,
+}
+var yyChk = []int{
+
+ -1000, -112, -113, -116, -114, 26, -117, 26, -115, -3,
+ 25, -91, 74, 75, -118, -124, 25, -60, -119, 22,
+ 63, 4, -125, -123, 25, -50, -74, -47, -26, 2,
+ 33, -126, 32, 18, -44, -23, -45, -43, -25, -29,
+ 16, 7, 11, 19, 14, 20, 28, 10, -14, -56,
+ 17, 31, 29, 21, -33, -46, -3, -39, 54, 57,
+ 50, 51, 70, 71, 53, 37, -40, 60, 4, -30,
+ -41, -79, -80, -20, -90, -88, -139, 72, 9, 24,
+ -84, -83, 30, 23, 63, -120, 60, -2, 4, -3,
+ 64, 64, 65, -141, 22, 33, 10, 32, 18, 63,
+ 63, -70, 60, -54, -34, -3, -75, 60, -54, -48,
+ 60, -36, -3, -18, 60, -3, 67, -37, -33, -3,
+ -37, -41, -39, -3, 18, -41, -33, -61, -56, -14,
+ 5, 43, 38, 48, 34, 39, 47, 46, 44, 40,
+ 41, 50, 51, 52, 53, 54, 55, 56, 57, 35,
+ 45, 49, 37, 66, 6, 76, -130, -135, -137, -131,
+ 60, 64, 72, -46, -46, -46, -46, -46, -46, -46,
+ -46, 68, -17, -14, -32, -86, -90, -88, 54, 37,
+ 60, -1, 36, 68, -1, 2, -35, 12, -14, -87,
+ 37, -90, -88, -85, -12, 60, 54, -30, 72, -1,
+ -1, -121, 61, -120, -117, -118, 4, 4, 25, 74,
+ 65, 25, -92, -91, -92, -108, -92, -19, -92, 60,
+ -71, 61, -70, -7, 66, 76, -86, -90, -88, -85,
+ -12, 60, 37, -75, 61, -7, 66, -78, 61, -48,
+ -7, -51, 68, -67, -68, -8, -31, -3, -81, -7,
+ 12, 60, -140, 60, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -56, -56, -14, -21, -22,
+ -38, -42, -44, -56, 27, -24, -38, 36, -24, 61,
+ -59, -17, -3, 60, -14, -35, -49, 61, -32, 9,
+ -14, -49, -66, -6, -11, -74, -26, 2, 68, 73,
+ 73, -7, -7, -7, 64, -7, -73, 69, -72, -55,
+ -13, 60, 54, -33, -4, 25, -69, 69, -27, -33,
+ -4, 60, -122, 63, -118, 4, -106, -107, -110, -109,
+ -91, 25, 72, 24, 30, 23, 54, 9, 37, 18,
+ 66, -106, -106, -51, 60, -100, -95, -3, -122, 63,
+ 66, -56, -34, -7, 9, -122, 63, 66, -56, -122,
+ 63, -66, 61, 76, -138, -31, -81, -7, -67, -6,
+ -67, -53, 36, 63, 66, 6, -14, -136, 63, -62,
+ -132, -138, 12, 76, -17, 32, 73, 67, -58, -57,
+ -28, -16, -14, 68, 68, 37, -7, -90, -88, -85,
+ -12, 60, -138, 76, -58, 69, 63, -127, -7, -7,
+ 61, -3, 73, -122, 63, -7, 76, -5, 4, -13,
+ 54, 25, -13, 60, 64, -122, 63, -82, 60, -4,
+ 61, -120, 63, 63, 73, 4, 72, 68, 68, -106,
+ -111, 60, 37, -107, -109, 9, 60, -93, -94, 60,
+ 4, 51, -3, 66, 63, 63, -101, -100, 61, 76,
+ -106, 12, 61, -70, -56, 61, 61, -77, -76, -75,
+ -54, -56, 61, -48, 69, -3, -52, -89, 60, -86,
+ -90, -88, -85, -12, -8, 61, 61, -129, -38, 27,
+ 27, 36, -38, -10, 69, -9, 8, 13, -53, 61,
+ -138, -17, 61, 61, -35, 69, 76, -138, 67, -49,
+ -49, -7, 61, 69, -6, -66, -7, 69, -72, -5,
+ -33, 61, -13, -5, -13, -3, 69, -27, -67, 61,
+ -106, 73, -106, -105, -104, -97, -3, -103, -102, -96,
+ -3, -106, 25, -91, -110, -106, -106, -101, 63, -94,
+ 4, -93, 61, -3, -95, -5, -106, -122, 63, -7,
+ 60, -67, -52, -66, 63, -14, -14, -62, -128, -59,
+ 67, -133, 61, 73, 67, -28, -16, -15, -14, 68,
+ -58, -58, 61, 69, -5, 61, 61, 61, -106, 73,
+ 69, 63, -106, 69, 63, 60, 61, 61, 50, 63,
+ -99, -98, 60, -106, 60, -5, 61, -76, -67, 61,
+ 69, -38, 69, -66, 67, 66, 6, 76, -64, -35,
+ -49, 69, 69, -5, -5, -52, -106, -97, -5, -96,
+ -101, -99, -94, -101, -101, 61, -14, -14, -65, -63,
+ 15, 73, -58, 61, 61, 61, 61, -52, 67, 67,
+ 21, -11, 69, -99, -99, -134, -24, -53,
+}
+var yyDef = []int{
+
+ 4, -2, 2, 0, 6, 0, 21, 0, 218, 0,
+ 157, 158, 159, 0, 5, 344, 19, -2, 0, 0,
+ 3, 0, 0, 0, 20, 0, 24, 25, 26, 27,
+ 0, 0, 0, 0, 256, 257, 258, 259, 260, 0,
+ 263, 155, 155, 0, 0, 0, 292, 38, -2, 0,
+ 74, 88, 91, 78, 163, 93, -2, 114, 0, 0,
+ 0, 0, 0, 0, 0, 0, 146, 0, 126, 127,
+ 134, 0, 0, 139, -2, -2, 0, 290, 0, 0,
+ 196, 197, 0, 0, 7, 8, 0, 21, 15, 0,
+ 0, 0, 0, 345, 0, 0, 0, 0, 0, 18,
+ 219, 28, 0, 0, 274, 154, 31, 0, 0, 35,
+ 0, 0, 47, 210, 249, 0, 261, 264, 156, 153,
+ 265, -2, 0, 162, 0, -2, 268, 269, 293, 276,
+ 0, 53, 54, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 294, 294, 0, 294,
+ 0, 0, 290, 115, 116, 117, 118, 119, 120, 121,
+ 122, 140, 0, 148, 149, 172, -2, -2, 0, 0,
+ 0, 140, 151, 152, -2, 217, 0, 0, 291, 193,
+ 0, 176, 177, 178, 179, 0, 0, 189, 0, 0,
+ 0, 286, 10, 13, 21, 12, 16, 17, 160, 161,
+ 22, 0, 0, 310, 0, 0, 311, 210, 0, 0,
+ 286, 30, 220, 39, 0, 0, 166, 167, 168, 169,
+ 170, 0, 0, 286, 34, 0, 0, 286, 37, 224,
+ 48, 204, -2, 0, 288, 247, 243, 162, 246, 150,
+ 164, 249, -2, 249, 50, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 51, 52, 277, 75, 0,
+ 71, 72, 295, 0, 0, 89, 76, 63, 79, 123,
+ 288, 278, 128, 0, 291, 0, 284, 147, 175, 0,
+ 288, 284, 0, 270, 252, 253, 254, 255, 59, 0,
+ 0, 194, 0, 198, 0, 0, 286, 201, 226, 0,
+ 302, 0, 0, 272, 238, -2, 286, 203, 228, 0,
+ 240, 0, 0, 287, 11, 0, 0, 312, 313, 314,
+ 317, 318, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 296, 0, 346, 0, 0, 287,
+ 0, 41, 275, 0, 0, 0, 287, 0, 43, 0,
+ 287, 0, 212, 289, 250, 244, 245, 165, 0, 262,
+ 0, 73, 65, 294, 0, 0, 69, 0, 294, 0,
+ 0, 0, 288, 289, 0, 0, 131, 290, 0, 288,
+ 280, 281, 142, 140, 140, 0, 199, -2, -2, -2,
+ -2, 0, 0, 289, 0, 216, -2, -2, 191, 192,
+ 180, 190, 0, 0, 287, 302, 0, 231, 303, 0,
+ 0, 236, 302, 0, 0, 0, 287, 239, 249, 0,
+ 9, 14, 304, 305, 0, 0, 0, 298, 300, 324,
+ 325, 0, 0, 315, 316, 0, 296, 0, 342, 0,
+ 339, 0, 341, 0, 308, 309, 0, 297, 0, 0,
+ 302, 0, 29, 221, 40, 171, 32, 286, 222, 44,
+ 46, 42, 36, 225, 211, 162, 209, 213, 249, 184,
+ 185, 186, 187, 188, 248, 212, 212, -2, 0, 0,
+ 0, 63, 77, 64, 92, 61, 0, 0, 80, 124,
+ 0, 279, 129, 130, 0, 137, 289, 285, 0, 284,
+ 284, 0, 135, 136, 271, 0, 195, 200, 227, 230,
+ 273, 302, 0, 233, 0, 237, 202, 229, 0, 241,
+ 319, 0, 0, 0, 299, 348, 0, 0, 301, 350,
+ 0, 334, -2, -2, 0, 327, 328, 0, 306, 0,
+ 340, 0, 335, 0, 347, 330, 302, 0, 287, 45,
+ 249, 0, 205, 0, 294, 67, 68, 0, -2, 0,
+ 58, 84, 125, 132, 290, 282, 283, 141, 144, 140,
+ 0, 0, -2, 60, 232, 302, 302, 212, 320, 0,
+ 322, 0, 302, 323, 0, 296, 326, 335, 0, 307,
+ 207, 336, 296, 338, 296, 331, 33, 223, 0, 214,
+ 66, 70, 90, 62, 55, 0, 0, 0, 86, 0,
+ 284, 143, 138, 234, 235, 242, 321, 349, 332, 351,
+ 0, 329, 0, 0, 0, 212, 0, 0, 81, 85,
+ 0, 133, 0, 335, 343, 337, 335, 206, 56, 57,
+ 82, 87, 145, 333, 208, 294, 0, 83,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 70, 3, 3, 65, 56, 57, 3,
+ 60, 61, 54, 50, 76, 51, 64, 55, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 67, 63,
+ 3, 66, 3, 74, 75, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 72, 3, 73, 53, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 68, 52, 69, 71,
+}
+var yyTok2 = []int{
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 58, 59,
+ 62,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+type yyParser interface {
+ Lookahead() int
+}
+
+type yyParserImpl struct {
+ lookahead func() int
+}
+
+func (p *yyParserImpl) Lookahead() int {
+ return p.lookahead()
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ token = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ token = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ token = yyTok3[i+0]
+ if token == char {
+ token = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func yyParse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ var yyDollar []yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yytoken := -1 // yychar translated into internal numbering
+ if lx, ok := yylex.(interface {
+ SetParser(yyParser)
+ }); ok {
+ p := &yyParserImpl{
+ lookahead: func() int { return yychar },
+ }
+ lx.SetParser(p)
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ yychar = -1
+ yytoken = -1
+ }()
+ }
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+ yyn += yytoken
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yytoken { /* valid shift */
+ yychar = -1
+ yytoken = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yytoken {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
+ }
+ if yytoken == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ yytoken = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 1:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:127
+ {
+ xtop = concat(xtop, yyDollar[4].list)
+ }
+ case 2:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:133
+ {
+ prevlineno = lineno
+ Yyerror("package statement must be first")
+ errorexit()
+ }
+ case 3:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:139
+ {
+ mkpackage(yyDollar[2].sym.Name)
+ }
+ case 4:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:149
+ {
+ importpkg = Runtimepkg
+
+ if Debug['A'] != 0 {
+ cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n")
+ } else {
+ cannedimports("runtime.Builtin", runtimeimport)
+ }
+ curio.importsafe = true
+ }
+ case 5:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:161
+ {
+ importpkg = nil
+ }
+ case 11:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:175
+ {
+ ipkg := importpkg
+ my := importmyname
+ importpkg = nil
+ importmyname = nil
+
+ if my == nil {
+ my = Lookup(ipkg.Name)
+ }
+
+ pack := Nod(OPACK, nil, nil)
+ pack.Sym = my
+ pack.Pkg = ipkg
+ pack.Lineno = int32(yyDollar[1].i)
+
+ if strings.HasPrefix(my.Name, ".") {
+ importdot(ipkg, pack)
+ break
+ }
+ if my.Name == "init" {
+ Yyerror("cannot import package as init - init must be a func")
+ break
+ }
+ if my.Name == "_" {
+ break
+ }
+ if my.Def != nil {
+ lineno = int32(yyDollar[1].i)
+ redeclare(my, "as imported package name")
+ }
+ my.Def = pack
+ my.Lastlineno = int32(yyDollar[1].i)
+ my.Block = 1 // at top level
+ }
+ case 12:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:210
+ {
+ // When an invalid import path is passed to importfile,
+ // it calls Yyerror and then sets up a fake import with
+ // no package statement. This allows us to test more
+ // than one invalid import statement in a single file.
+ if nerrors == 0 {
+ Fatal("phase error in import")
+ }
+ }
+ case 15:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:226
+ {
+ // import with original name
+ yyVAL.i = parserline()
+ importmyname = nil
+ importfile(&yyDollar[1].val, yyVAL.i)
+ }
+ case 16:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:233
+ {
+ // import with given name
+ yyVAL.i = parserline()
+ importmyname = yyDollar[1].sym
+ importfile(&yyDollar[2].val, yyVAL.i)
+ }
+ case 17:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:240
+ {
+ // import into my name space
+ yyVAL.i = parserline()
+ importmyname = Lookup(".")
+ importfile(&yyDollar[2].val, yyVAL.i)
+ }
+ case 18:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:249
+ {
+ if importpkg.Name == "" {
+ importpkg.Name = yyDollar[2].sym.Name
+ Pkglookup(yyDollar[2].sym.Name, nil).Npkg++
+ } else if importpkg.Name != yyDollar[2].sym.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", importpkg.Name, yyDollar[2].sym.Name, Zconv(importpkg.Path, 0))
+ }
+ importpkg.Direct = 1
+ importpkg.Safe = curio.importsafe
+
+ if safemode != 0 && !curio.importsafe {
+ Yyerror("cannot import unsafe package \"%v\"", Zconv(importpkg.Path, 0))
+ }
+ }
+ case 20:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:266
+ {
+ if yyDollar[1].sym.Name == "safe" {
+ curio.importsafe = true
+ }
+ }
+ case 21:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:273
+ {
+ defercheckwidth()
+ }
+ case 22:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:277
+ {
+ resumecheckwidth()
+ unimportfile()
+ }
+ case 23:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:286
+ {
+ Yyerror("empty top-level declaration")
+ yyVAL.list = nil
+ }
+ case 24:
+ yyVAL.list = yyS[yypt-0].list
+ case 25:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:292
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 26:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:296
+ {
+ Yyerror("non-declaration statement outside function body")
+ yyVAL.list = nil
+ }
+ case 27:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:301
+ {
+ yyVAL.list = nil
+ }
+ case 28:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:307
+ {
+ yyVAL.list = yyDollar[2].list
+ }
+ case 29:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:311
+ {
+ yyVAL.list = yyDollar[3].list
+ }
+ case 30:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:315
+ {
+ yyVAL.list = nil
+ }
+ case 31:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:319
+ {
+ yyVAL.list = yyDollar[2].list
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 32:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:325
+ {
+ yyVAL.list = yyDollar[3].list
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 33:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:331
+ {
+ yyVAL.list = concat(yyDollar[3].list, yyDollar[5].list)
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 34:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:337
+ {
+ yyVAL.list = nil
+ iota_ = -100000
+ }
+ case 35:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:342
+ {
+ yyVAL.list = list1(yyDollar[2].node)
+ }
+ case 36:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:346
+ {
+ yyVAL.list = yyDollar[3].list
+ }
+ case 37:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:350
+ {
+ yyVAL.list = nil
+ }
+ case 38:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:356
+ {
+ iota_ = 0
+ }
+ case 39:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:362
+ {
+ yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, nil)
+ }
+ case 40:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:366
+ {
+ yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+ }
+ case 41:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:370
+ {
+ yyVAL.list = variter(yyDollar[1].list, nil, yyDollar[3].list)
+ }
+ case 42:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:376
+ {
+ yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+ }
+ case 43:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:380
+ {
+ yyVAL.list = constiter(yyDollar[1].list, nil, yyDollar[3].list)
+ }
+ case 44:
+ yyVAL.list = yyS[yypt-0].list
+ case 45:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:387
+ {
+ yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, nil)
+ }
+ case 46:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:391
+ {
+ yyVAL.list = constiter(yyDollar[1].list, nil, nil)
+ }
+ case 47:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:397
+ {
+ // different from dclname because the name
+ // becomes visible right here, not at the end
+ // of the declaration.
+ yyVAL.node = typedcl0(yyDollar[1].sym)
+ }
+ case 48:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:406
+ {
+ yyVAL.node = typedcl1(yyDollar[1].node, yyDollar[2].node, 1)
+ }
+ case 49:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:412
+ {
+ yyVAL.node = yyDollar[1].node
+
+ // These nodes do not carry line numbers.
+ // Since a bare name used as an expression is an error,
+ // introduce a wrapper node to give the correct line.
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ yyVAL.node.Implicit = 1
+ break
+ }
+ }
+ case 50:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:426
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
+ yyVAL.node.Etype = uint8(yyDollar[2].i) // rathole to pass opcode
+ }
+ case 51:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:431
+ {
+ if yyDollar[1].list.Next == nil && yyDollar[3].list.Next == nil {
+ // simple
+ yyVAL.node = Nod(OAS, yyDollar[1].list.N, yyDollar[3].list.N)
+ break
+ }
+ // multiple
+ yyVAL.node = Nod(OAS2, nil, nil)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Rlist = yyDollar[3].list
+ }
+ case 52:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:443
+ {
+ if yyDollar[3].list.N.Op == OTYPESW {
+ yyVAL.node = Nod(OTYPESW, nil, yyDollar[3].list.N.Right)
+ if yyDollar[3].list.Next != nil {
+ Yyerror("expr.(type) must be alone in list")
+ }
+ if yyDollar[1].list.Next != nil {
+ Yyerror("argument count mismatch: %d = %d", count(yyDollar[1].list), 1)
+ } else if (yyDollar[1].list.N.Op != ONAME && yyDollar[1].list.N.Op != OTYPE && yyDollar[1].list.N.Op != ONONAME) || isblank(yyDollar[1].list.N) {
+ Yyerror("invalid variable name %nil in type switch", yyDollar[1].list.N)
+ } else {
+ yyVAL.node.Left = dclname(yyDollar[1].list.N.Sym)
+ } // it's a colas, so must not re-use an oldname.
+ break
+ }
+ yyVAL.node = colas(yyDollar[1].list, yyDollar[3].list, int32(yyDollar[2].i))
+ }
+ case 53:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:461
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+ yyVAL.node.Implicit = 1
+ yyVAL.node.Etype = OADD
+ }
+ case 54:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:467
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+ yyVAL.node.Implicit = 1
+ yyVAL.node.Etype = OSUB
+ }
+ case 55:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:475
+ {
+ var n, nn *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ yyVAL.node.List = yyDollar[2].list
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym)
+ declare(nn, dclcontext)
+ yyVAL.node.Nname = nn
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right
+ }
+ }
+ }
+ case 56:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:498
+ {
+ var n *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ if yyDollar[2].list.Next == nil {
+ n = Nod(OAS, yyDollar[2].list.N, yyDollar[4].node)
+ } else {
+ n = Nod(OAS2, nil, nil)
+ n.List = yyDollar[2].list
+ n.Rlist = list1(yyDollar[4].node)
+ }
+ yyVAL.node.List = list1(n)
+ }
+ case 57:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:516
+ {
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ yyVAL.node.List = list1(colas(yyDollar[2].list, list1(yyDollar[4].node), int32(yyDollar[3].i)))
+ }
+ case 58:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:525
+ {
+ var n, nn *Node
+
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym)
+ declare(nn, dclcontext)
+ yyVAL.node.Nname = nn
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right
+ }
+ }
+ }
+ case 59:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:546
+ {
+ markdcl()
+ }
+ case 60:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:550
+ {
+ if yyDollar[3].list == nil {
+ yyVAL.node = Nod(OEMPTY, nil, nil)
+ } else {
+ yyVAL.node = liststmt(yyDollar[3].list)
+ }
+ popdcl()
+ }
+ case 61:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:561
+ {
+ // If the last token read by the lexer was consumed
+ // as part of the case, clear it (parser has cleared yychar).
+ // If the last token read by the lexer was the lookahead
+ // leave it alone (parser has it cached in yychar).
+ // This is so that the stmt_list action doesn't look at
+ // the case tokens if the stmt_list is empty.
+ yylast = yychar
+ yyDollar[1].node.Xoffset = int64(block)
+ }
+ case 62:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:572
+ {
+ // This is the only place in the language where a statement
+ // list is not allowed to drop the final semicolon, because
+ // it's the only place where a statement list is not followed
+ // by a closing brace. Handle the error for pedantry.
+
+ // Find the final token of the statement list.
+ // yylast is lookahead; yyprev is last of stmt_list
+ last := yyprev
+
+ if last > 0 && last != ';' && yychar != '}' {
+ Yyerror("missing statement after label")
+ }
+ yyVAL.node = yyDollar[1].node
+ yyVAL.node.Nbody = yyDollar[3].list
+ popdcl()
+ }
+ case 63:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:591
+ {
+ yyVAL.list = nil
+ }
+ case 64:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:595
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[2].node)
+ }
+ case 65:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:601
+ {
+ markdcl()
+ }
+ case 66:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:605
+ {
+ yyVAL.list = yyDollar[3].list
+ popdcl()
+ }
+ case 67:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:612
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Etype = 0 // := flag
+ }
+ case 68:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:618
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Colas = 1
+ colasdefn(yyDollar[1].list, yyVAL.node)
+ }
+ case 69:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:625
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[2].node)
+ yyVAL.node.Etype = 0 // := flag
+ }
+ case 70:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:632
+ {
+ // init ; test ; incr
+ if yyDollar[5].node != nil && yyDollar[5].node.Colas != 0 {
+ Yyerror("cannot declare in the for-increment")
+ }
+ yyVAL.node = Nod(OFOR, nil, nil)
+ if yyDollar[1].node != nil {
+ yyVAL.node.Ninit = list1(yyDollar[1].node)
+ }
+ yyVAL.node.Ntest = yyDollar[3].node
+ yyVAL.node.Nincr = yyDollar[5].node
+ }
+ case 71:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:645
+ {
+ // normal test
+ yyVAL.node = Nod(OFOR, nil, nil)
+ yyVAL.node.Ntest = yyDollar[1].node
+ }
+ case 72:
+ yyVAL.node = yyS[yypt-0].node
+ case 73:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:654
+ {
+ yyVAL.node = yyDollar[1].node
+ yyVAL.node.Nbody = concat(yyVAL.node.Nbody, yyDollar[2].list)
+ }
+ case 74:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:661
+ {
+ markdcl()
+ }
+ case 75:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:665
+ {
+ yyVAL.node = yyDollar[3].node
+ popdcl()
+ }
+ case 76:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:672
+ {
+ // test
+ yyVAL.node = Nod(OIF, nil, nil)
+ yyVAL.node.Ntest = yyDollar[1].node
+ }
+ case 77:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:678
+ {
+ // init ; test
+ yyVAL.node = Nod(OIF, nil, nil)
+ if yyDollar[1].node != nil {
+ yyVAL.node.Ninit = list1(yyDollar[1].node)
+ }
+ yyVAL.node.Ntest = yyDollar[3].node
+ }
+ case 78:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:690
+ {
+ markdcl()
+ }
+ case 79:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:694
+ {
+ if yyDollar[3].node.Ntest == nil {
+ Yyerror("missing condition in if statement")
+ }
+ }
+ case 80:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:700
+ {
+ yyDollar[3].node.Nbody = yyDollar[5].list
+ }
+ case 81:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:704
+ {
+ var n *Node
+ var nn *NodeList
+
+ yyVAL.node = yyDollar[3].node
+ n = yyDollar[3].node
+ popdcl()
+ for nn = concat(yyDollar[7].list, yyDollar[8].list); nn != nil; nn = nn.Next {
+ if nn.N.Op == OIF {
+ popdcl()
+ }
+ n.Nelse = list1(nn.N)
+ n = nn.N
+ }
+ }
+ case 82:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:722
+ {
+ markdcl()
+ }
+ case 83:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:726
+ {
+ if yyDollar[4].node.Ntest == nil {
+ Yyerror("missing condition in if statement")
+ }
+ yyDollar[4].node.Nbody = yyDollar[5].list
+ yyVAL.list = list1(yyDollar[4].node)
+ }
+ case 84:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:735
+ {
+ yyVAL.list = nil
+ }
+ case 85:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:739
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+ }
+ case 86:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:744
+ {
+ yyVAL.list = nil
+ }
+ case 87:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:748
+ {
+ l := &NodeList{N: yyDollar[2].node}
+ l.End = l
+ yyVAL.list = l
+ }
+ case 88:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:756
+ {
+ markdcl()
+ }
+ case 89:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:760
+ {
+ var n *Node
+ n = yyDollar[3].node.Ntest
+ if n != nil && n.Op != OTYPESW {
+ n = nil
+ }
+ typesw = Nod(OXXX, typesw, n)
+ }
+ case 90:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:769
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Op = OSWITCH
+ yyVAL.node.List = yyDollar[6].list
+ typesw = typesw.Left
+ popdcl()
+ }
+ case 91:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:779
+ {
+ typesw = Nod(OXXX, typesw, nil)
+ }
+ case 92:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:783
+ {
+ yyVAL.node = Nod(OSELECT, nil, nil)
+ yyVAL.node.Lineno = typesw.Lineno
+ yyVAL.node.List = yyDollar[4].list
+ typesw = typesw.Left
+ }
+ case 93:
+ yyVAL.node = yyS[yypt-0].node
+ case 94:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:796
+ {
+ yyVAL.node = Nod(OOROR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 95:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:800
+ {
+ yyVAL.node = Nod(OANDAND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 96:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:804
+ {
+ yyVAL.node = Nod(OEQ, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 97:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:808
+ {
+ yyVAL.node = Nod(ONE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 98:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:812
+ {
+ yyVAL.node = Nod(OLT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 99:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:816
+ {
+ yyVAL.node = Nod(OLE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 100:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:820
+ {
+ yyVAL.node = Nod(OGE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 101:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:824
+ {
+ yyVAL.node = Nod(OGT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 102:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:828
+ {
+ yyVAL.node = Nod(OADD, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 103:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:832
+ {
+ yyVAL.node = Nod(OSUB, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 104:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:836
+ {
+ yyVAL.node = Nod(OOR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 105:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:840
+ {
+ yyVAL.node = Nod(OXOR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 106:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:844
+ {
+ yyVAL.node = Nod(OMUL, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 107:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:848
+ {
+ yyVAL.node = Nod(ODIV, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 108:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:852
+ {
+ yyVAL.node = Nod(OMOD, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 109:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:856
+ {
+ yyVAL.node = Nod(OAND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 110:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:860
+ {
+ yyVAL.node = Nod(OANDNOT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 111:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:864
+ {
+ yyVAL.node = Nod(OLSH, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 112:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:868
+ {
+ yyVAL.node = Nod(ORSH, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 113:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:873
+ {
+ yyVAL.node = Nod(OSEND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 114:
+ yyVAL.node = yyS[yypt-0].node
+ case 115:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:880
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 116:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:884
+ {
+ if yyDollar[2].node.Op == OCOMPLIT {
+ // Special case for &T{...}: turn into (*T){...}.
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.Right = Nod(OIND, yyVAL.node.Right, nil)
+ yyVAL.node.Right.Implicit = 1
+ } else {
+ yyVAL.node = Nod(OADDR, yyDollar[2].node, nil)
+ }
+ }
+ case 117:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:895
+ {
+ yyVAL.node = Nod(OPLUS, yyDollar[2].node, nil)
+ }
+ case 118:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:899
+ {
+ yyVAL.node = Nod(OMINUS, yyDollar[2].node, nil)
+ }
+ case 119:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:903
+ {
+ yyVAL.node = Nod(ONOT, yyDollar[2].node, nil)
+ }
+ case 120:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:907
+ {
+ Yyerror("the bitwise complement operator is ^")
+ yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+ }
+ case 121:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:912
+ {
+ yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+ }
+ case 122:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:916
+ {
+ yyVAL.node = Nod(ORECV, yyDollar[2].node, nil)
+ }
+ case 123:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:926
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ }
+ case 124:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:930
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 125:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:935
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = yyDollar[3].list
+ yyVAL.node.Isddd = 1
+ }
+ case 126:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:943
+ {
+ yyVAL.node = nodlit(yyDollar[1].val)
+ }
+ case 127:
+ yyVAL.node = yyS[yypt-0].node
+ case 128:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:948
+ {
+ if yyDollar[1].node.Op == OPACK {
+ var s *Sym
+ s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+ yyDollar[1].node.Used = 1
+ yyVAL.node = oldname(s)
+ break
+ }
+ yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+ }
+ case 129:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:959
+ {
+ yyVAL.node = Nod(ODOTTYPE, yyDollar[1].node, yyDollar[4].node)
+ }
+ case 130:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:963
+ {
+ yyVAL.node = Nod(OTYPESW, nil, yyDollar[1].node)
+ }
+ case 131:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:967
+ {
+ yyVAL.node = Nod(OINDEX, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 132:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:971
+ {
+ yyVAL.node = Nod(OSLICE, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, yyDollar[5].node))
+ }
+ case 133:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:975
+ {
+ if yyDollar[5].node == nil {
+ Yyerror("middle index required in 3-index slice")
+ }
+ if yyDollar[7].node == nil {
+ Yyerror("final index required in 3-index slice")
+ }
+ yyVAL.node = Nod(OSLICE3, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, Nod(OKEY, yyDollar[5].node, yyDollar[7].node)))
+ }
+ case 134:
+ yyVAL.node = yyS[yypt-0].node
+ case 135:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:986
+ {
+ // conversion
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = list1(yyDollar[3].node)
+ }
+ case 136:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:992
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Right = yyDollar[1].node
+ yyVAL.node.List = yyDollar[4].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 137:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:999
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Right = yyDollar[1].node
+ yyVAL.node.List = yyDollar[4].list
+ }
+ case 138:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:1005
+ {
+ Yyerror("cannot parenthesize type in composite literal")
+ yyVAL.node = yyDollar[5].node
+ yyVAL.node.Right = yyDollar[2].node
+ yyVAL.node.List = yyDollar[6].list
+ }
+ case 139:
+ yyVAL.node = yyS[yypt-0].node
+ case 140:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1014
+ {
+ // composite expression.
+ // make node early so we get the right line number.
+ yyVAL.node = Nod(OCOMPLIT, nil, nil)
+ }
+ case 141:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1022
+ {
+ yyVAL.node = Nod(OKEY, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 142:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1028
+ {
+ // These nodes do not carry line numbers.
+ // Since a composite literal commonly spans several lines,
+ // the line number on errors may be misleading.
+ // Introduce a wrapper node to give the correct line.
+ yyVAL.node = yyDollar[1].node
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ yyVAL.node.Implicit = 1
+ }
+ }
+ case 143:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1041
+ {
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 144:
+ yyVAL.node = yyS[yypt-0].node
+ case 145:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1049
+ {
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 146:
+ yyVAL.node = yyS[yypt-0].node
+ case 147:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1057
+ {
+ yyVAL.node = yyDollar[2].node
+
+ // Need to know on lhs of := whether there are ( ).
+ // Don't bother with the OPAREN in other cases:
+ // it's just a waste of memory and time.
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ }
+ }
+ case 148:
+ yyVAL.node = yyS[yypt-0].node
+ case 149:
+ yyVAL.node = yyS[yypt-0].node
+ case 150:
+ yyVAL.node = yyS[yypt-0].node
+ case 151:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1078
+ {
+ yyVAL.i = LBODY
+ }
+ case 152:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1082
+ {
+ yyVAL.i = '{'
+ }
+ case 153:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1093
+ {
+ if yyDollar[1].sym == nil {
+ yyVAL.node = nil
+ } else {
+ yyVAL.node = newname(yyDollar[1].sym)
+ }
+ }
+ case 154:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1103
+ {
+ yyVAL.node = dclname(yyDollar[1].sym)
+ }
+ case 155:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1108
+ {
+ yyVAL.node = nil
+ }
+ case 156:
+ yyVAL.node = yyS[yypt-0].node
+ case 157:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1115
+ {
+ yyVAL.sym = yyDollar[1].sym
+ // during imports, unqualified non-exported identifiers are from builtinpkg
+ if importpkg != nil && !exportname(yyDollar[1].sym.Name) {
+ yyVAL.sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+ }
+ }
+ case 158:
+ yyVAL.sym = yyS[yypt-0].sym
+ case 159:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1124
+ {
+ yyVAL.sym = nil
+ }
+ case 160:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1130
+ {
+ var p *Pkg
+
+ if yyDollar[2].val.U.Sval.S == "" {
+ p = importpkg
+ } else {
+ if isbadimport(yyDollar[2].val.U.Sval) {
+ errorexit()
+ }
+ p = mkpkg(yyDollar[2].val.U.Sval)
+ }
+ yyVAL.sym = Pkglookup(yyDollar[4].sym.Name, p)
+ }
+ case 161:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1144
+ {
+ var p *Pkg
+
+ if yyDollar[2].val.U.Sval.S == "" {
+ p = importpkg
+ } else {
+ if isbadimport(yyDollar[2].val.U.Sval) {
+ errorexit()
+ }
+ p = mkpkg(yyDollar[2].val.U.Sval)
+ }
+ yyVAL.sym = Pkglookup("?", p)
+ }
+ case 162:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1160
+ {
+ yyVAL.node = oldname(yyDollar[1].sym)
+ if yyVAL.node.Pack != nil {
+ yyVAL.node.Pack.Used = 1
+ }
+ }
+ case 163:
+ yyVAL.node = yyS[yypt-0].node
+ case 164:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1181
+ {
+ Yyerror("final argument in variadic function missing type")
+ yyVAL.node = Nod(ODDD, typenod(typ(TINTER)), nil)
+ }
+ case 165:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1186
+ {
+ yyVAL.node = Nod(ODDD, yyDollar[2].node, nil)
+ }
+ case 166:
+ yyVAL.node = yyS[yypt-0].node
+ case 167:
+ yyVAL.node = yyS[yypt-0].node
+ case 168:
+ yyVAL.node = yyS[yypt-0].node
+ case 169:
+ yyVAL.node = yyS[yypt-0].node
+ case 170:
+ yyVAL.node = yyS[yypt-0].node
+ case 171:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1197
+ {
+ yyVAL.node = yyDollar[2].node
+ }
+ case 172:
+ yyVAL.node = yyS[yypt-0].node
+ case 173:
+ yyVAL.node = yyS[yypt-0].node
+ case 174:
+ yyVAL.node = yyS[yypt-0].node
+ case 175:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1206
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 176:
+ yyVAL.node = yyS[yypt-0].node
+ case 177:
+ yyVAL.node = yyS[yypt-0].node
+ case 178:
+ yyVAL.node = yyS[yypt-0].node
+ case 179:
+ yyVAL.node = yyS[yypt-0].node
+ case 180:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1216
+ {
+ yyVAL.node = yyDollar[2].node
+ }
+ case 181:
+ yyVAL.node = yyS[yypt-0].node
+ case 182:
+ yyVAL.node = yyS[yypt-0].node
+ case 183:
+ yyVAL.node = yyS[yypt-0].node
+ case 184:
+ yyVAL.node = yyS[yypt-0].node
+ case 185:
+ yyVAL.node = yyS[yypt-0].node
+ case 186:
+ yyVAL.node = yyS[yypt-0].node
+ case 187:
+ yyVAL.node = yyS[yypt-0].node
+ case 188:
+ yyVAL.node = yyS[yypt-0].node
+ case 189:
+ yyVAL.node = yyS[yypt-0].node
+ case 190:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1237
+ {
+ if yyDollar[1].node.Op == OPACK {
+ var s *Sym
+ s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+ yyDollar[1].node.Used = 1
+ yyVAL.node = oldname(s)
+ break
+ }
+ yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+ }
+ case 191:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1250
+ {
+ yyVAL.node = Nod(OTARRAY, yyDollar[2].node, yyDollar[4].node)
+ }
+ case 192:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1254
+ {
+ // array literal of nelem
+ yyVAL.node = Nod(OTARRAY, Nod(ODDD, nil, nil), yyDollar[4].node)
+ }
+ case 193:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1259
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[2].node, nil)
+ yyVAL.node.Etype = Cboth
+ }
+ case 194:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1264
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+ yyVAL.node.Etype = Csend
+ }
+ case 195:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1269
+ {
+ yyVAL.node = Nod(OTMAP, yyDollar[3].node, yyDollar[5].node)
+ }
+ case 196:
+ yyVAL.node = yyS[yypt-0].node
+ case 197:
+ yyVAL.node = yyS[yypt-0].node
+ case 198:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1277
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 199:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1283
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+ yyVAL.node.Etype = Crecv
+ }
+ case 200:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1290
+ {
+ yyVAL.node = Nod(OTSTRUCT, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 201:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1296
+ {
+ yyVAL.node = Nod(OTSTRUCT, nil, nil)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 202:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1303
+ {
+ yyVAL.node = Nod(OTINTER, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 203:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1309
+ {
+ yyVAL.node = Nod(OTINTER, nil, nil)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 204:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1320
+ {
+ yyVAL.node = yyDollar[2].node
+ if yyVAL.node == nil {
+ break
+ }
+ if noescape && yyDollar[3].list != nil {
+ Yyerror("can only use //go:noescape with external func implementations")
+ }
+ yyVAL.node.Nbody = yyDollar[3].list
+ yyVAL.node.Endlineno = lineno
+ yyVAL.node.Noescape = noescape
+ yyVAL.node.Nosplit = nosplit
+ yyVAL.node.Nowritebarrier = nowritebarrier
+ funcbody(yyVAL.node)
+ }
+ case 205:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1338
+ {
+ var t *Node
+
+ yyVAL.node = nil
+ yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+
+ if yyDollar[1].sym.Name == "init" {
+ yyDollar[1].sym = renameinit()
+ if yyDollar[3].list != nil || yyDollar[5].list != nil {
+ Yyerror("func init must have no arguments and no return values")
+ }
+ }
+ if localpkg.Name == "main" && yyDollar[1].sym.Name == "main" {
+ if yyDollar[3].list != nil || yyDollar[5].list != nil {
+ Yyerror("func main must have no arguments and no return values")
+ }
+ }
+
+ t = Nod(OTFUNC, nil, nil)
+ t.List = yyDollar[3].list
+ t.Rlist = yyDollar[5].list
+
+ yyVAL.node = Nod(ODCLFUNC, nil, nil)
+ yyVAL.node.Nname = newname(yyDollar[1].sym)
+ yyVAL.node.Nname.Defn = yyVAL.node
+ yyVAL.node.Nname.Ntype = t // TODO: check if nname already has an ntype
+ declare(yyVAL.node.Nname, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 206:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:1369
+ {
+ var rcvr, t *Node
+
+ yyVAL.node = nil
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+ yyDollar[6].list = checkarglist(yyDollar[6].list, 1)
+
+ if yyDollar[2].list == nil {
+ Yyerror("method has no receiver")
+ break
+ }
+ if yyDollar[2].list.Next != nil {
+ Yyerror("method has multiple receivers")
+ break
+ }
+ rcvr = yyDollar[2].list.N
+ if rcvr.Op != ODCLFIELD {
+ Yyerror("bad receiver in method")
+ break
+ }
+
+ t = Nod(OTFUNC, rcvr, nil)
+ t.List = yyDollar[6].list
+ t.Rlist = yyDollar[8].list
+
+ yyVAL.node = Nod(ODCLFUNC, nil, nil)
+ yyVAL.node.Shortname = newname(yyDollar[4].sym)
+ yyVAL.node.Nname = methodname1(yyVAL.node.Shortname, rcvr.Right)
+ yyVAL.node.Nname.Defn = yyVAL.node
+ yyVAL.node.Nname.Ntype = t
+ yyVAL.node.Nname.Nointerface = nointerface
+ declare(yyVAL.node.Nname, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 207:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1407
+ {
+ var s *Sym
+ var t *Type
+
+ yyVAL.node = nil
+
+ s = yyDollar[1].sym
+ t = functype(nil, yyDollar[3].list, yyDollar[5].list)
+
+ importsym(s, ONAME)
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ dclcontext = PDISCARD // since we skip funchdr below
+ break
+ }
+ Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", Sconv(s, 0), Tconv(s.Def.Type, 0), Tconv(t, 0))
+ }
+
+ yyVAL.node = newname(s)
+ yyVAL.node.Type = t
+ declare(yyVAL.node, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 208:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:1432
+ {
+ yyVAL.node = methodname1(newname(yyDollar[4].sym), yyDollar[2].list.N.Right)
+ yyVAL.node.Type = functype(yyDollar[2].list.N, yyDollar[6].list, yyDollar[8].list)
+
+ checkwidth(yyVAL.node.Type)
+ addmethod(yyDollar[4].sym, yyVAL.node.Type, false, nointerface)
+ nointerface = false
+ funchdr(yyVAL.node)
+
+ // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+ // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+ // out by typecheck's lookdot as this $$.ttype. So by providing
+ // this back link here we avoid special casing there.
+ yyVAL.node.Type.Nname = yyVAL.node
+ }
+ case 209:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1450
+ {
+ yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+ yyVAL.node = Nod(OTFUNC, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ yyVAL.node.Rlist = yyDollar[5].list
+ }
+ case 210:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1458
+ {
+ yyVAL.list = nil
+ }
+ case 211:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1462
+ {
+ yyVAL.list = yyDollar[2].list
+ if yyVAL.list == nil {
+ yyVAL.list = list1(Nod(OEMPTY, nil, nil))
+ }
+ }
+ case 212:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1471
+ {
+ yyVAL.list = nil
+ }
+ case 213:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1475
+ {
+ yyVAL.list = list1(Nod(ODCLFIELD, nil, yyDollar[1].node))
+ }
+ case 214:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1479
+ {
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+ yyVAL.list = yyDollar[2].list
+ }
+ case 215:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1486
+ {
+ closurehdr(yyDollar[1].node)
+ }
+ case 216:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1492
+ {
+ yyVAL.node = closurebody(yyDollar[3].list)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 217:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1497
+ {
+ yyVAL.node = closurebody(nil)
+ }
+ case 218:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1508
+ {
+ yyVAL.list = nil
+ }
+ case 219:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1512
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+ if nsyntaxerrors == 0 {
+ testdclstack()
+ }
+ nointerface = false
+ noescape = false
+ nosplit = false
+ nowritebarrier = false
+ }
+ case 220:
+ yyVAL.list = yyS[yypt-0].list
+ case 221:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1526
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 222:
+ yyVAL.list = yyS[yypt-0].list
+ case 223:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1533
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 224:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1539
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 225:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1543
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 226:
+ yyVAL.list = yyS[yypt-0].list
+ case 227:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1550
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 228:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1556
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 229:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1560
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 230:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1566
+ {
+ var l *NodeList
+
+ var n *Node
+ l = yyDollar[1].list
+ if l == nil {
+ // ? symbol, during import (list1(nil) == nil)
+ n = yyDollar[2].node
+ if n.Op == OIND {
+ n = n.Left
+ }
+ n = embedded(n.Sym, importpkg)
+ n.Right = yyDollar[2].node
+ n.Val = yyDollar[3].val
+ yyVAL.list = list1(n)
+ break
+ }
+
+ for l = yyDollar[1].list; l != nil; l = l.Next {
+ l.N = Nod(ODCLFIELD, l.N, yyDollar[2].node)
+ l.N.Val = yyDollar[3].val
+ }
+ }
+ case 231:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1590
+ {
+ yyDollar[1].node.Val = yyDollar[2].val
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 232:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1595
+ {
+ yyDollar[2].node.Val = yyDollar[4].val
+ yyVAL.list = list1(yyDollar[2].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 233:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1601
+ {
+ yyDollar[2].node.Right = Nod(OIND, yyDollar[2].node.Right, nil)
+ yyDollar[2].node.Val = yyDollar[3].val
+ yyVAL.list = list1(yyDollar[2].node)
+ }
+ case 234:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1607
+ {
+ yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+ yyDollar[3].node.Val = yyDollar[5].val
+ yyVAL.list = list1(yyDollar[3].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 235:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1614
+ {
+ yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+ yyDollar[3].node.Val = yyDollar[5].val
+ yyVAL.list = list1(yyDollar[3].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 236:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1623
+ {
+ var n *Node
+
+ yyVAL.sym = yyDollar[1].sym
+ n = oldname(yyDollar[1].sym)
+ if n.Pack != nil {
+ n.Pack.Used = 1
+ }
+ }
+ case 237:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1633
+ {
+ var pkg *Pkg
+
+ if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OPACK {
+ Yyerror("%v is not a package", Sconv(yyDollar[1].sym, 0))
+ pkg = localpkg
+ } else {
+ yyDollar[1].sym.Def.Used = 1
+ pkg = yyDollar[1].sym.Def.Pkg
+ }
+ yyVAL.sym = restrictlookup(yyDollar[3].sym.Name, pkg)
+ }
+ case 238:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1648
+ {
+ yyVAL.node = embedded(yyDollar[1].sym, localpkg)
+ }
+ case 239:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1654
+ {
+ yyVAL.node = Nod(ODCLFIELD, yyDollar[1].node, yyDollar[2].node)
+ ifacedcl(yyVAL.node)
+ }
+ case 240:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1659
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[1].sym))
+ }
+ case 241:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1663
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[2].sym))
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 242:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1670
+ {
+ // without func keyword
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 1)
+ yyVAL.node = Nod(OTFUNC, fakethis(), nil)
+ yyVAL.node.List = yyDollar[2].list
+ yyVAL.node.Rlist = yyDollar[4].list
+ }
+ case 243:
+ yyVAL.node = yyS[yypt-0].node
+ case 244:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1684
+ {
+ yyVAL.node = Nod(ONONAME, nil, nil)
+ yyVAL.node.Sym = yyDollar[1].sym
+ yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+ }
+ case 245:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1690
+ {
+ yyVAL.node = Nod(ONONAME, nil, nil)
+ yyVAL.node.Sym = yyDollar[1].sym
+ yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+ }
+ case 246:
+ yyVAL.node = yyS[yypt-0].node
+ case 247:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1699
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 248:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1703
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 249:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1708
+ {
+ yyVAL.list = nil
+ }
+ case 250:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1712
+ {
+ yyVAL.list = yyDollar[1].list
+ }
+ case 251:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1720
+ {
+ yyVAL.node = nil
+ }
+ case 252:
+ yyVAL.node = yyS[yypt-0].node
+ case 253:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1725
+ {
+ yyVAL.node = liststmt(yyDollar[1].list)
+ }
+ case 254:
+ yyVAL.node = yyS[yypt-0].node
+ case 255:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1730
+ {
+ yyVAL.node = nil
+ }
+ case 256:
+ yyVAL.node = yyS[yypt-0].node
+ case 257:
+ yyVAL.node = yyS[yypt-0].node
+ case 258:
+ yyVAL.node = yyS[yypt-0].node
+ case 259:
+ yyVAL.node = yyS[yypt-0].node
+ case 260:
+ yyVAL.node = yyS[yypt-0].node
+ case 261:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1741
+ {
+ yyDollar[1].node = Nod(OLABEL, yyDollar[1].node, nil)
+ yyDollar[1].node.Sym = dclstack // context, for goto restrictions
+ }
+ case 262:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1746
+ {
+ var l *NodeList
+
+ yyDollar[1].node.Defn = yyDollar[4].node
+ l = list1(yyDollar[1].node)
+ if yyDollar[4].node != nil {
+ l = list(l, yyDollar[4].node)
+ }
+ yyVAL.node = liststmt(l)
+ }
+ case 263:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1757
+ {
+ // will be converted to OFALL
+ yyVAL.node = Nod(OXFALL, nil, nil)
+ yyVAL.node.Xoffset = int64(block)
+ }
+ case 264:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1763
+ {
+ yyVAL.node = Nod(OBREAK, yyDollar[2].node, nil)
+ }
+ case 265:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1767
+ {
+ yyVAL.node = Nod(OCONTINUE, yyDollar[2].node, nil)
+ }
+ case 266:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1771
+ {
+ yyVAL.node = Nod(OPROC, yyDollar[2].node, nil)
+ }
+ case 267:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1775
+ {
+ yyVAL.node = Nod(ODEFER, yyDollar[2].node, nil)
+ }
+ case 268:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1779
+ {
+ yyVAL.node = Nod(OGOTO, yyDollar[2].node, nil)
+ yyVAL.node.Sym = dclstack // context, for goto restrictions
+ }
+ case 269:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1784
+ {
+ yyVAL.node = Nod(ORETURN, nil, nil)
+ yyVAL.node.List = yyDollar[2].list
+ if yyVAL.node.List == nil && Curfn != nil {
+ var l *NodeList
+
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == PPARAM {
+ continue
+ }
+ if l.N.Class != PPARAMOUT {
+ break
+ }
+ if l.N.Sym.Def != l.N {
+ Yyerror("%s is shadowed during return", l.N.Sym.Name)
+ }
+ }
+ }
+ }
+ case 270:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1806
+ {
+ yyVAL.list = nil
+ if yyDollar[1].node != nil {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ }
+ case 271:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1813
+ {
+ yyVAL.list = yyDollar[1].list
+ if yyDollar[3].node != nil {
+ yyVAL.list = list(yyVAL.list, yyDollar[3].node)
+ }
+ }
+ case 272:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1822
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 273:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1826
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 274:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1832
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 275:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1836
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 276:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1842
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 277:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1846
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 278:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1852
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 279:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1856
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 280:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1865
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 281:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1869
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 282:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1873
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 283:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1877
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 284:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1882
+ {
+ yyVAL.list = nil
+ }
+ case 285:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1886
+ {
+ yyVAL.list = yyDollar[1].list
+ }
+ case 290:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1900
+ {
+ yyVAL.node = nil
+ }
+ case 291:
+ yyVAL.node = yyS[yypt-0].node
+ case 292:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1906
+ {
+ yyVAL.list = nil
+ }
+ case 293:
+ yyVAL.list = yyS[yypt-0].list
+ case 294:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1912
+ {
+ yyVAL.node = nil
+ }
+ case 295:
+ yyVAL.node = yyS[yypt-0].node
+ case 296:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1918
+ {
+ yyVAL.list = nil
+ }
+ case 297:
+ yyVAL.list = yyS[yypt-0].list
+ case 298:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1924
+ {
+ yyVAL.list = nil
+ }
+ case 299:
+ yyVAL.list = yyS[yypt-0].list
+ case 300:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1930
+ {
+ yyVAL.list = nil
+ }
+ case 301:
+ yyVAL.list = yyS[yypt-0].list
+ case 302:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1936
+ {
+ yyVAL.val.Ctype = CTxxx
+ }
+ case 303:
+ yyVAL.val = yyS[yypt-0].val
+ case 304:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1946
+ {
+ importimport(yyDollar[2].sym, yyDollar[3].val.U.Sval)
+ }
+ case 305:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1950
+ {
+ importvar(yyDollar[2].sym, yyDollar[3].typ)
+ }
+ case 306:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1954
+ {
+ importconst(yyDollar[2].sym, Types[TIDEAL], yyDollar[4].node)
+ }
+ case 307:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:1958
+ {
+ importconst(yyDollar[2].sym, yyDollar[3].typ, yyDollar[5].node)
+ }
+ case 308:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1962
+ {
+ importtype(yyDollar[2].typ, yyDollar[3].typ)
+ }
+ case 309:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1966
+ {
+ if yyDollar[2].node == nil {
+ dclcontext = PEXTERN // since we skip the funcbody below
+ break
+ }
+
+ yyDollar[2].node.Inl = yyDollar[3].list
+
+ funcbody(yyDollar[2].node)
+ importlist = list(importlist, yyDollar[2].node)
+
+ if Debug['E'] > 0 {
+ print("import [%v] func %lN \n", Zconv(importpkg.Path, 0), yyDollar[2].node)
+ if Debug['m'] > 2 && yyDollar[2].node.Inl != nil {
+ print("inl body:%+H\n", yyDollar[2].node.Inl)
+ }
+ }
+ }
+ case 310:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1987
+ {
+ yyVAL.sym = yyDollar[1].sym
+ structpkg = yyVAL.sym.Pkg
+ }
+ case 311:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1994
+ {
+ yyVAL.typ = pkgtype(yyDollar[1].sym)
+ importsym(yyDollar[1].sym, OTYPE)
+ }
+ case 312:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 313:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 314:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 315:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 316:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 317:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2014
+ {
+ yyVAL.typ = pkgtype(yyDollar[1].sym)
+ }
+ case 318:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2018
+ {
+ // predefined name like uint8
+ yyDollar[1].sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+ if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OTYPE {
+ Yyerror("%s is not a type", yyDollar[1].sym.Name)
+ yyVAL.typ = nil
+ } else {
+ yyVAL.typ = yyDollar[1].sym.Def.Type
+ }
+ }
+ case 319:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2029
+ {
+ yyVAL.typ = aindex(nil, yyDollar[3].typ)
+ }
+ case 320:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2033
+ {
+ yyVAL.typ = aindex(nodlit(yyDollar[2].val), yyDollar[4].typ)
+ }
+ case 321:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2037
+ {
+ yyVAL.typ = maptype(yyDollar[3].typ, yyDollar[5].typ)
+ }
+ case 322:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2041
+ {
+ yyVAL.typ = tostruct(yyDollar[3].list)
+ }
+ case 323:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2045
+ {
+ yyVAL.typ = tointerface(yyDollar[3].list)
+ }
+ case 324:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2049
+ {
+ yyVAL.typ = Ptrto(yyDollar[2].typ)
+ }
+ case 325:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2053
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[2].typ
+ yyVAL.typ.Chan = Cboth
+ }
+ case 326:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2059
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Cboth
+ }
+ case 327:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2065
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Csend
+ }
+ case 328:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2073
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Crecv
+ }
+ case 329:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2081
+ {
+ yyVAL.typ = functype(nil, yyDollar[3].list, yyDollar[5].list)
+ }
+ case 330:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2087
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[2].typ))
+ if yyDollar[1].sym != nil {
+ yyVAL.node.Left = newname(yyDollar[1].sym)
+ }
+ yyVAL.node.Val = yyDollar[3].val
+ }
+ case 331:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2095
+ {
+ var t *Type
+
+ t = typ(TARRAY)
+ t.Bound = -1
+ t.Type = yyDollar[3].typ
+
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(t))
+ if yyDollar[1].sym != nil {
+ yyVAL.node.Left = newname(yyDollar[1].sym)
+ }
+ yyVAL.node.Isddd = 1
+ yyVAL.node.Val = yyDollar[4].val
+ }
+ case 332:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2112
+ {
+ var s *Sym
+ var p *Pkg
+
+ if yyDollar[1].sym != nil && yyDollar[1].sym.Name != "?" {
+ yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(yyDollar[2].typ))
+ yyVAL.node.Val = yyDollar[3].val
+ } else {
+ s = yyDollar[2].typ.Sym
+ if s == nil && Isptr[yyDollar[2].typ.Etype] != 0 {
+ s = yyDollar[2].typ.Type.Sym
+ }
+ p = importpkg
+ if yyDollar[1].sym != nil {
+ p = yyDollar[1].sym.Pkg
+ }
+ yyVAL.node = embedded(s, p)
+ yyVAL.node.Right = typenod(yyDollar[2].typ)
+ yyVAL.node.Val = yyDollar[3].val
+ }
+ }
+ case 333:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2136
+ {
+ yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(functype(fakethis(), yyDollar[3].list, yyDollar[5].list)))
+ }
+ case 334:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2140
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ))
+ }
+ case 335:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:2145
+ {
+ yyVAL.list = nil
+ }
+ case 336:
+ yyVAL.list = yyS[yypt-0].list
+ case 337:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2152
+ {
+ yyVAL.list = yyDollar[2].list
+ }
+ case 338:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2156
+ {
+ yyVAL.list = list1(Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ)))
+ }
+ case 339:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2166
+ {
+ yyVAL.node = nodlit(yyDollar[1].val)
+ }
+ case 340:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2170
+ {
+ yyVAL.node = nodlit(yyDollar[2].val)
+ switch yyVAL.node.Val.Ctype {
+ case CTINT, CTRUNE:
+ mpnegfix(yyVAL.node.Val.U.Xval)
+ break
+ case CTFLT:
+ mpnegflt(yyVAL.node.Val.U.Fval)
+ break
+ case CTCPLX:
+ mpnegflt(&yyVAL.node.Val.U.Cval.Real)
+ mpnegflt(&yyVAL.node.Val.U.Cval.Imag)
+ break
+ default:
+ Yyerror("bad negated constant")
+ }
+ }
+ case 341:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2188
+ {
+ yyVAL.node = oldname(Pkglookup(yyDollar[1].sym.Name, builtinpkg))
+ if yyVAL.node.Op != OLITERAL {
+ Yyerror("bad constant %v", Sconv(yyVAL.node.Sym, 0))
+ }
+ }
+ case 342:
+ yyVAL.node = yyS[yypt-0].node
+ case 343:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2198
+ {
+ if yyDollar[2].node.Val.Ctype == CTRUNE && yyDollar[4].node.Val.Ctype == CTINT {
+ yyVAL.node = yyDollar[2].node
+ mpaddfixfix(yyDollar[2].node.Val.U.Xval, yyDollar[4].node.Val.U.Xval, 0)
+ break
+ }
+ yyDollar[4].node.Val.U.Cval.Real = yyDollar[4].node.Val.U.Cval.Imag
+ Mpmovecflt(&yyDollar[4].node.Val.U.Cval.Imag, 0.0)
+ yyVAL.node = nodcplxlit(yyDollar[2].node.Val, yyDollar[4].node.Val)
+ }
+ case 346:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2214
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 347:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2218
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 348:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2224
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 349:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2228
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 350:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2234
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 351:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2238
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ }
+ goto yystack /* stack new state and value */
+}
diff --git a/src/cmd/internal/gc/y.output b/src/cmd/internal/gc/y.output
new file mode 100644
index 0000000000..e4a5e5c212
--- /dev/null
+++ b/src/cmd/internal/gc/y.output
@@ -0,0 +1,10411 @@
+
+state 0
+ $accept: .file $end
+ $$4: . (4)
+
+ . reduce 4 (src line 148)
+
+ file goto 1
+ loadsys goto 2
+ $$4 goto 3
+
+state 1
+ $accept: file.$end
+
+ $end accept
+ . error
+
+
+state 2
+ file: loadsys.package imports xdcl_list
+ package: . (2)
+
+ LPACKAGE shift 5
+ . reduce 2 (src line 131)
+
+ package goto 4
+
+state 3
+ loadsys: $$4.import_package import_there
+
+ LPACKAGE shift 7
+ . error
+
+ import_package goto 6
+
+state 4
+ file: loadsys package.imports xdcl_list
+ imports: . (6)
+
+ . reduce 6 (src line 165)
+
+ imports goto 8
+
+state 5
+ package: LPACKAGE.sym ';'
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 9
+ hidden_importsym goto 11
+
+state 6
+ loadsys: $$4 import_package.import_there
+ $$21: . (21)
+
+ . reduce 21 (src line 272)
+
+ import_there goto 14
+ $$21 goto 15
+
+state 7
+ import_package: LPACKAGE.LNAME import_safety ';'
+
+ LNAME shift 16
+ . error
+
+
+state 8
+ file: loadsys package imports.xdcl_list
+ imports: imports.import ';'
+ xdcl_list: . (218)
+
+ LIMPORT shift 19
+ . reduce 218 (src line 1507)
+
+ xdcl_list goto 17
+ import goto 18
+
+state 9
+ package: LPACKAGE sym.';'
+
+ ';' shift 20
+ . error
+
+
+state 10
+ sym: LNAME. (157)
+
+ . reduce 157 (src line 1113)
+
+
+state 11
+ sym: hidden_importsym. (158)
+
+ . reduce 158 (src line 1122)
+
+
+state 12
+ sym: '?'. (159)
+
+ . reduce 159 (src line 1123)
+
+
+state 13
+ hidden_importsym: '@'.LLITERAL '.' LNAME
+ hidden_importsym: '@'.LLITERAL '.' '?'
+
+ LLITERAL shift 21
+ . error
+
+
+state 14
+ loadsys: $$4 import_package import_there. (5)
+
+ . reduce 5 (src line 159)
+
+
+state 15
+ import_there: $$21.hidden_import_list '$' '$'
+ hidden_import_list: . (344)
+
+ . reduce 344 (src line 2209)
+
+ hidden_import_list goto 22
+
+state 16
+ import_package: LPACKAGE LNAME.import_safety ';'
+ import_safety: . (19)
+
+ LNAME shift 24
+ . reduce 19 (src line 264)
+
+ import_safety goto 23
+
+state 17
+ file: loadsys package imports xdcl_list. (1)
+ xdcl_list: xdcl_list.xdcl ';'
+ xdcl: . (23)
+
+ $end reduce 1 (src line 122)
+ error shift 29
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 33
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 23 (src line 285)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 28
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ xfndcl goto 27
+ xdcl goto 25
+ expr_list goto 49
+ common_dcl goto 26
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 18
+ imports: imports import.';'
+
+ ';' shift 84
+ . error
+
+
+state 19
+ import: LIMPORT.import_stmt
+ import: LIMPORT.'(' import_stmt_list osemi ')'
+ import: LIMPORT.'(' ')'
+
+ LLITERAL shift 88
+ LNAME shift 10
+ '(' shift 86
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 85
+
+state 20
+ package: LPACKAGE sym ';'. (3)
+
+ . reduce 3 (src line 138)
+
+
+state 21
+ hidden_importsym: '@' LLITERAL.'.' LNAME
+ hidden_importsym: '@' LLITERAL.'.' '?'
+
+ '.' shift 91
+ . error
+
+
+state 22
+ import_there: $$21 hidden_import_list.'$' '$'
+ hidden_import_list: hidden_import_list.hidden_import
+
+ LCONST shift 96
+ LFUNC shift 98
+ LIMPORT shift 94
+ LTYPE shift 97
+ LVAR shift 95
+ '$' shift 92
+ . error
+
+ hidden_import goto 93
+
+state 23
+ import_package: LPACKAGE LNAME import_safety.';'
+
+ ';' shift 99
+ . error
+
+
+state 24
+ import_safety: LNAME. (20)
+
+ . reduce 20 (src line 265)
+
+
+state 25
+ xdcl_list: xdcl_list xdcl.';'
+
+ ';' shift 100
+ . error
+
+
+state 26
+ xdcl: common_dcl. (24)
+
+ . reduce 24 (src line 290)
+
+
+state 27
+ xdcl: xfndcl. (25)
+
+ . reduce 25 (src line 291)
+
+
+state 28
+ xdcl: non_dcl_stmt. (26)
+
+ . reduce 26 (src line 295)
+
+
+state 29
+ xdcl: error. (27)
+
+ . reduce 27 (src line 300)
+
+
+state 30
+ common_dcl: LVAR.vardcl
+ common_dcl: LVAR.'(' vardcl_list osemi ')'
+ common_dcl: LVAR.'(' ')'
+
+ LNAME shift 10
+ '(' shift 102
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 101
+ hidden_importsym goto 11
+
+state 31
+ common_dcl: lconst.constdcl
+ common_dcl: lconst.'(' constdcl osemi ')'
+ common_dcl: lconst.'(' constdcl ';' constdcl_list osemi ')'
+ common_dcl: lconst.'(' ')'
+
+ LNAME shift 10
+ '(' shift 107
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 108
+ constdcl goto 106
+ hidden_importsym goto 11
+
+state 32
+ common_dcl: LTYPE.typedcl
+ common_dcl: LTYPE.'(' typedcl_list osemi ')'
+ common_dcl: LTYPE.'(' ')'
+
+ LNAME shift 10
+ '(' shift 110
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 109
+ hidden_importsym goto 11
+
+state 33
+ xfndcl: LFUNC.fndcl fnbody
+ fntype: LFUNC.'(' oarg_type_list_ocomma ')' fnres
+
+ LNAME shift 10
+ '(' shift 114
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 115
+ fndcl goto 113
+ hidden_importsym goto 11
+
+state 34
+ non_dcl_stmt: simple_stmt. (256)
+
+ . reduce 256 (src line 1734)
+
+
+state 35
+ non_dcl_stmt: for_stmt. (257)
+
+ . reduce 257 (src line 1736)
+
+
+state 36
+ non_dcl_stmt: switch_stmt. (258)
+
+ . reduce 258 (src line 1737)
+
+
+state 37
+ non_dcl_stmt: select_stmt. (259)
+
+ . reduce 259 (src line 1738)
+
+
+state 38
+ non_dcl_stmt: if_stmt. (260)
+
+ . reduce 260 (src line 1739)
+
+
+state 39
+ non_dcl_stmt: labelname.':' $$261 stmt
+
+ ':' shift 116
+ . error
+
+
+state 40
+ non_dcl_stmt: LFALL. (263)
+
+ . reduce 263 (src line 1756)
+
+
+state 41
+ non_dcl_stmt: LBREAK.onew_name
+ onew_name: . (155)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 155 (src line 1107)
+
+ sym goto 119
+ new_name goto 118
+ onew_name goto 117
+ hidden_importsym goto 11
+
+state 42
+ non_dcl_stmt: LCONTINUE.onew_name
+ onew_name: . (155)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 155 (src line 1107)
+
+ sym goto 119
+ new_name goto 118
+ onew_name goto 120
+ hidden_importsym goto 11
+
+state 43
+ non_dcl_stmt: LGO.pseudocall
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ '(' shift 67
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 122
+ pexpr_no_paren goto 66
+ pseudocall goto 121
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 44
+ non_dcl_stmt: LDEFER.pseudocall
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ '(' shift 67
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 122
+ pexpr_no_paren goto 66
+ pseudocall goto 125
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 45
+ non_dcl_stmt: LGOTO.new_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ new_name goto 126
+ hidden_importsym goto 11
+
+state 46
+ non_dcl_stmt: LRETURN.oexpr_list
+ oexpr_list: . (292)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 292 (src line 1905)
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 128
+ oexpr_list goto 127
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 47
+ lconst: LCONST. (38)
+
+ . reduce 38 (src line 354)
+
+
+state 48
+ simple_stmt: expr. (49)
+ simple_stmt: expr.LASOP expr
+ simple_stmt: expr.LINC
+ simple_stmt: expr.LDEC
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr. (276)
+
+ LASOP shift 130
+ LCOLAS reduce 276 (src line 1840)
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LDEC shift 132
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LINC shift 131
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ '=' reduce 276 (src line 1840)
+ ',' reduce 276 (src line 1840)
+ . reduce 49 (src line 410)
+
+
+state 49
+ simple_stmt: expr_list.'=' expr_list
+ simple_stmt: expr_list.LCOLAS expr_list
+ expr_list: expr_list.',' expr
+
+ LCOLAS shift 154
+ '=' shift 153
+ ',' shift 155
+ . error
+
+
+state 50
+ for_stmt: LFOR.$$74 for_body
+ $$74: . (74)
+
+ . reduce 74 (src line 659)
+
+ $$74 goto 156
+
+state 51
+ switch_stmt: LSWITCH.$$88 if_header $$89 LBODY caseblock_list '}'
+ $$88: . (88)
+
+ . reduce 88 (src line 754)
+
+ $$88 goto 157
+
+state 52
+ select_stmt: LSELECT.$$91 LBODY caseblock_list '}'
+ $$91: . (91)
+
+ . reduce 91 (src line 777)
+
+ $$91 goto 158
+
+state 53
+ if_stmt: LIF.$$78 if_header $$79 loop_body $$80 elseif_list else
+ $$78: . (78)
+
+ . reduce 78 (src line 688)
+
+ $$78 goto 159
+
+state 54
+ labelname: new_name. (163)
+
+ . reduce 163 (src line 1167)
+
+
+state 55
+ expr: uexpr. (93)
+
+ . reduce 93 (src line 793)
+
+
+state 56
+ new_name: sym. (153)
+ name: sym. (162)
+
+ ':' reduce 153 (src line 1091)
+ . reduce 162 (src line 1158)
+
+
+state 57
+ uexpr: pexpr. (114)
+ pseudocall: pexpr.'(' ')'
+ pseudocall: pexpr.'(' expr_or_type_list ocomma ')'
+ pseudocall: pexpr.'(' expr_or_type_list LDDD ocomma ')'
+ pexpr_no_paren: pexpr.'.' sym
+ pexpr_no_paren: pexpr.'.' '(' expr_or_type ')'
+ pexpr_no_paren: pexpr.'.' '(' LTYPE ')'
+ pexpr_no_paren: pexpr.'[' expr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ':' oexpr ']'
+
+ '(' shift 160
+ '.' shift 161
+ '[' shift 162
+ . reduce 114 (src line 877)
+
+
+state 58
+ uexpr: '*'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 163
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 59
+ uexpr: '&'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 164
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 60
+ uexpr: '+'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 165
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 61
+ uexpr: '-'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 166
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 62
+ uexpr: '!'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 167
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 63
+ uexpr: '~'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 168
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 64
+ uexpr: '^'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 169
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 65
+ uexpr: LCOMM.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 170
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 66
+ pexpr_no_paren: pexpr_no_paren.'{' start_complit braced_keyval_list '}'
+ pexpr: pexpr_no_paren. (146)
+
+ '{' shift 171
+ . reduce 146 (src line 1054)
+
+
+state 67
+ pexpr_no_paren: '('.expr_or_type ')' '{' start_complit braced_keyval_list '}'
+ pexpr: '('.expr_or_type ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 172
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 68
+ pexpr_no_paren: LLITERAL. (126)
+
+ . reduce 126 (src line 941)
+
+
+state 69
+ pexpr_no_paren: name. (127)
+
+ . reduce 127 (src line 946)
+
+
+state 70
+ pexpr_no_paren: pseudocall. (134)
+
+ . reduce 134 (src line 984)
+
+
+state 71
+ pexpr_no_paren: convtype.'(' expr ocomma ')'
+
+ '(' shift 180
+ . error
+
+
+state 72
+ pexpr_no_paren: comptype.lbrace start_complit braced_keyval_list '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 181
+
+state 73
+ pexpr_no_paren: fnliteral. (139)
+
+ . reduce 139 (src line 1011)
+
+
+state 74
+ convtype: fntype. (181)
+ fnlitdcl: fntype. (215)
+
+ '(' reduce 181 (src line 1220)
+ . reduce 215 (src line 1484)
+
+
+state 75
+ convtype: othertype. (182)
+ comptype: othertype. (183)
+
+ '(' reduce 182 (src line 1222)
+ . reduce 183 (src line 1224)
+
+
+state 76
+ fnliteral: fnlitdcl.lbrace stmt_list '}'
+ fnliteral: fnlitdcl.error
+
+ error shift 185
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 184
+
+state 77
+ othertype: '['.oexpr ']' ntype
+ othertype: '['.LDDD ']' ntype
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LDDD shift 187
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 186
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 78
+ othertype: LCHAN.non_recvchantype
+ othertype: LCHAN.LCOMM ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 190
+ '*' shift 196
+ '(' shift 195
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ dotname goto 194
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 193
+ non_recvchantype goto 189
+ othertype goto 192
+ fntype goto 191
+ hidden_importsym goto 11
+
+state 79
+ othertype: LMAP.'[' ntype ']' ntype
+
+ '[' shift 198
+ . error
+
+
+state 80
+ othertype: structtype. (196)
+
+ . reduce 196 (src line 1272)
+
+
+state 81
+ othertype: interfacetype. (197)
+
+ . reduce 197 (src line 1273)
+
+
+state 82
+ structtype: LSTRUCT.lbrace structdcl_list osemi '}'
+ structtype: LSTRUCT.lbrace '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 199
+
+state 83
+ interfacetype: LINTERFACE.lbrace interfacedcl_list osemi '}'
+ interfacetype: LINTERFACE.lbrace '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 200
+
+state 84
+ imports: imports import ';'. (7)
+
+ . reduce 7 (src line 166)
+
+
+state 85
+ import: LIMPORT import_stmt. (8)
+
+ . reduce 8 (src line 168)
+
+
+state 86
+ import: LIMPORT '('.import_stmt_list osemi ')'
+ import: LIMPORT '('.')'
+
+ LLITERAL shift 88
+ LNAME shift 10
+ ')' shift 202
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 203
+ import_stmt_list goto 201
+
+state 87
+ import_stmt: import_here.import_package import_there
+ import_stmt: import_here.import_there
+ $$21: . (21)
+
+ LPACKAGE shift 7
+ . reduce 21 (src line 272)
+
+ import_package goto 204
+ import_there goto 205
+ $$21 goto 15
+
+state 88
+ import_here: LLITERAL. (15)
+
+ . reduce 15 (src line 224)
+
+
+state 89
+ import_here: sym.LLITERAL
+
+ LLITERAL shift 206
+ . error
+
+
+state 90
+ import_here: '.'.LLITERAL
+
+ LLITERAL shift 207
+ . error
+
+
+state 91
+ hidden_importsym: '@' LLITERAL '.'.LNAME
+ hidden_importsym: '@' LLITERAL '.'.'?'
+
+ LNAME shift 208
+ '?' shift 209
+ . error
+
+
+state 92
+ import_there: $$21 hidden_import_list '$'.'$'
+
+ '$' shift 210
+ . error
+
+
+state 93
+ hidden_import_list: hidden_import_list hidden_import. (345)
+
+ . reduce 345 (src line 2210)
+
+
+state 94
+ hidden_import: LIMPORT.LNAME LLITERAL ';'
+
+ LNAME shift 211
+ . error
+
+
+state 95
+ hidden_import: LVAR.hidden_pkg_importsym hidden_type ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 212
+
+state 96
+ hidden_import: LCONST.hidden_pkg_importsym '=' hidden_constant ';'
+ hidden_import: LCONST.hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 214
+
+state 97
+ hidden_import: LTYPE.hidden_pkgtype hidden_type ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 216
+ hidden_pkgtype goto 215
+
+state 98
+ hidden_import: LFUNC.hidden_fndcl fnbody ';'
+
+ '(' shift 219
+ '@' shift 13
+ . error
+
+ hidden_fndcl goto 217
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 218
+
+state 99
+ import_package: LPACKAGE LNAME import_safety ';'. (18)
+
+ . reduce 18 (src line 247)
+
+
+state 100
+ xdcl_list: xdcl_list xdcl ';'. (219)
+
+ . reduce 219 (src line 1511)
+
+
+state 101
+ common_dcl: LVAR vardcl. (28)
+
+ . reduce 28 (src line 305)
+
+
+state 102
+ common_dcl: LVAR '('.vardcl_list osemi ')'
+ common_dcl: LVAR '('.')'
+
+ LNAME shift 10
+ ')' shift 221
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 222
+ vardcl_list goto 220
+ hidden_importsym goto 11
+
+state 103
+ vardcl: dcl_name_list.ntype
+ vardcl: dcl_name_list.ntype '=' expr_list
+ vardcl: dcl_name_list.'=' expr_list
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 224
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . error
+
+ sym goto 123
+ ntype goto 223
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 104
+ dcl_name_list: dcl_name. (274)
+
+ . reduce 274 (src line 1830)
+
+
+state 105
+ dcl_name: sym. (154)
+
+ . reduce 154 (src line 1101)
+
+
+state 106
+ common_dcl: lconst constdcl. (31)
+
+ . reduce 31 (src line 318)
+
+
+state 107
+ common_dcl: lconst '('.constdcl osemi ')'
+ common_dcl: lconst '('.constdcl ';' constdcl_list osemi ')'
+ common_dcl: lconst '('.')'
+
+ LNAME shift 10
+ ')' shift 234
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 108
+ constdcl goto 233
+ hidden_importsym goto 11
+
+state 108
+ constdcl: dcl_name_list.ntype '=' expr_list
+ constdcl: dcl_name_list.'=' expr_list
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 236
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . error
+
+ sym goto 123
+ ntype goto 235
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 109
+ common_dcl: LTYPE typedcl. (35)
+
+ . reduce 35 (src line 341)
+
+
+state 110
+ common_dcl: LTYPE '('.typedcl_list osemi ')'
+ common_dcl: LTYPE '('.')'
+
+ LNAME shift 10
+ ')' shift 238
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 239
+ typedcl_list goto 237
+ hidden_importsym goto 11
+
+state 111
+ typedcl: typedclname.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 240
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 112
+ typedclname: sym. (47)
+
+ . reduce 47 (src line 395)
+
+
+state 113
+ xfndcl: LFUNC fndcl.fnbody
+ fnbody: . (210)
+
+ '{' shift 242
+ . reduce 210 (src line 1457)
+
+ fnbody goto 241
+
+state 114
+ fndcl: '('.oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 243
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 115
+ fndcl: sym.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 251
+ . error
+
+
+state 116
+ non_dcl_stmt: labelname ':'.$$261 stmt
+ $$261: . (261)
+
+ . reduce 261 (src line 1740)
+
+ $$261 goto 252
+
+state 117
+ non_dcl_stmt: LBREAK onew_name. (264)
+
+ . reduce 264 (src line 1762)
+
+
+state 118
+ onew_name: new_name. (156)
+
+ . reduce 156 (src line 1111)
+
+
+state 119
+ new_name: sym. (153)
+
+ . reduce 153 (src line 1091)
+
+
+state 120
+ non_dcl_stmt: LCONTINUE onew_name. (265)
+
+ . reduce 265 (src line 1766)
+
+
+state 121
+ pexpr_no_paren: pseudocall. (134)
+ non_dcl_stmt: LGO pseudocall. (266)
+
+ '(' reduce 134 (src line 984)
+ '.' reduce 134 (src line 984)
+ '{' reduce 134 (src line 984)
+ '[' reduce 134 (src line 984)
+ . reduce 266 (src line 1770)
+
+
+state 122
+ pseudocall: pexpr.'(' ')'
+ pseudocall: pexpr.'(' expr_or_type_list ocomma ')'
+ pseudocall: pexpr.'(' expr_or_type_list LDDD ocomma ')'
+ pexpr_no_paren: pexpr.'.' sym
+ pexpr_no_paren: pexpr.'.' '(' expr_or_type ')'
+ pexpr_no_paren: pexpr.'.' '(' LTYPE ')'
+ pexpr_no_paren: pexpr.'[' expr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ':' oexpr ']'
+
+ '(' shift 160
+ '.' shift 161
+ '[' shift 162
+ . error
+
+
+state 123
+ name: sym. (162)
+
+ . reduce 162 (src line 1158)
+
+
+state 124
+ fntype: LFUNC.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 253
+ . error
+
+
+state 125
+ pexpr_no_paren: pseudocall. (134)
+ non_dcl_stmt: LDEFER pseudocall. (267)
+
+ '(' reduce 134 (src line 984)
+ '.' reduce 134 (src line 984)
+ '{' reduce 134 (src line 984)
+ '[' reduce 134 (src line 984)
+ . reduce 267 (src line 1774)
+
+
+state 126
+ non_dcl_stmt: LGOTO new_name. (268)
+
+ . reduce 268 (src line 1778)
+
+
+state 127
+ non_dcl_stmt: LRETURN oexpr_list. (269)
+
+ . reduce 269 (src line 1783)
+
+
+state 128
+ expr_list: expr_list.',' expr
+ oexpr_list: expr_list. (293)
+
+ ',' shift 155
+ . reduce 293 (src line 1909)
+
+
+state 129
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr. (276)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 276 (src line 1840)
+
+
+state 130
+ simple_stmt: expr LASOP.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 254
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 131
+ simple_stmt: expr LINC. (53)
+
+ . reduce 53 (src line 460)
+
+
+state 132
+ simple_stmt: expr LDEC. (54)
+
+ . reduce 54 (src line 466)
+
+
+state 133
+ expr: expr LOROR.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 255
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 134
+ expr: expr LANDAND.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 256
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 135
+ expr: expr LEQ.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 257
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 136
+ expr: expr LNE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 258
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 137
+ expr: expr LLT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 259
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 138
+ expr: expr LLE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 260
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 139
+ expr: expr LGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 261
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 140
+ expr: expr LGT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 262
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 141
+ expr: expr '+'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 263
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 142
+ expr: expr '-'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 264
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 143
+ expr: expr '|'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 265
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 144
+ expr: expr '^'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 266
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 145
+ expr: expr '*'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 267
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 146
+ expr: expr '/'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 268
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 147
+ expr: expr '%'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 269
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 148
+ expr: expr '&'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 270
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 149
+ expr: expr LANDNOT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 271
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 150
+ expr: expr LLSH.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 272
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 151
+ expr: expr LRSH.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 273
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 152
+ expr: expr LCOMM.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 274
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 153
+ simple_stmt: expr_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 275
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 154
+ simple_stmt: expr_list LCOLAS.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 276
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 155
+ expr_list: expr_list ','.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 277
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 156
+ for_stmt: LFOR $$74.for_body
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 284
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ for_body goto 278
+ for_header goto 279
+ name goto 69
+ osimple_stmt goto 280
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ range_stmt goto 281
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 283
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 157
+ switch_stmt: LSWITCH $$88.if_header $$89 LBODY caseblock_list '}'
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 285
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 158
+ select_stmt: LSELECT $$91.LBODY caseblock_list '}'
+
+ LBODY shift 287
+ . error
+
+
+state 159
+ if_stmt: LIF $$78.if_header $$79 loop_body $$80 elseif_list else
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 288
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 160
+ pseudocall: pexpr '('.')'
+ pseudocall: pexpr '('.expr_or_type_list ocomma ')'
+ pseudocall: pexpr '('.expr_or_type_list LDDD ocomma ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ ')' shift 289
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 291
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_or_type_list goto 290
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 161
+ pexpr_no_paren: pexpr '.'.sym
+ pexpr_no_paren: pexpr '.'.'(' expr_or_type ')'
+ pexpr_no_paren: pexpr '.'.'(' LTYPE ')'
+
+ LNAME shift 10
+ '(' shift 293
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 292
+ hidden_importsym goto 11
+
+state 162
+ pexpr_no_paren: pexpr '['.expr ']'
+ pexpr_no_paren: pexpr '['.oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr '['.oexpr ':' oexpr ':' oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 294
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 295
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 163
+ uexpr: '*' uexpr. (115)
+
+ . reduce 115 (src line 879)
+
+
+state 164
+ uexpr: '&' uexpr. (116)
+
+ . reduce 116 (src line 883)
+
+
+state 165
+ uexpr: '+' uexpr. (117)
+
+ . reduce 117 (src line 894)
+
+
+state 166
+ uexpr: '-' uexpr. (118)
+
+ . reduce 118 (src line 898)
+
+
+state 167
+ uexpr: '!' uexpr. (119)
+
+ . reduce 119 (src line 902)
+
+
+state 168
+ uexpr: '~' uexpr. (120)
+
+ . reduce 120 (src line 906)
+
+
+state 169
+ uexpr: '^' uexpr. (121)
+
+ . reduce 121 (src line 911)
+
+
+state 170
+ uexpr: LCOMM uexpr. (122)
+
+ . reduce 122 (src line 915)
+
+
+state 171
+ pexpr_no_paren: pexpr_no_paren '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 296
+
+state 172
+ pexpr_no_paren: '(' expr_or_type.')' '{' start_complit braced_keyval_list '}'
+ pexpr: '(' expr_or_type.')'
+
+ ')' shift 297
+ . error
+
+
+state 173
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_or_type: expr. (148)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 148 (src line 1069)
+
+
+state 174
+ expr_or_type: non_expr_type. (149)
+
+ . reduce 149 (src line 1071)
+
+
+state 175
+ non_expr_type: recvchantype. (172)
+
+ . reduce 172 (src line 1201)
+
+
+state 176
+ non_expr_type: fntype. (173)
+ convtype: fntype. (181)
+ fnlitdcl: fntype. (215)
+
+ error reduce 215 (src line 1484)
+ LBODY reduce 215 (src line 1484)
+ '(' reduce 181 (src line 1220)
+ '{' reduce 215 (src line 1484)
+ . reduce 173 (src line 1203)
+
+
+state 177
+ non_expr_type: othertype. (174)
+ convtype: othertype. (182)
+ comptype: othertype. (183)
+
+ LBODY reduce 183 (src line 1224)
+ '(' reduce 182 (src line 1222)
+ '{' reduce 183 (src line 1224)
+ . reduce 174 (src line 1204)
+
+
+state 178
+ uexpr: '*'.uexpr
+ non_expr_type: '*'.non_expr_type
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 298
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 163
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 179
+ uexpr: LCOMM.uexpr
+ recvchantype: LCOMM.LCHAN ntype
+
+ LLITERAL shift 68
+ LCHAN shift 299
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 170
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 180
+ pexpr_no_paren: convtype '('.expr ocomma ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 300
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 181
+ pexpr_no_paren: comptype lbrace.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 301
+
+state 182
+ lbrace: LBODY. (151)
+
+ . reduce 151 (src line 1076)
+
+
+state 183
+ lbrace: '{'. (152)
+
+ . reduce 152 (src line 1081)
+
+
+state 184
+ fnliteral: fnlitdcl lbrace.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 302
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 185
+ fnliteral: fnlitdcl error. (217)
+
+ . reduce 217 (src line 1496)
+
+
+state 186
+ othertype: '[' oexpr.']' ntype
+
+ ']' shift 309
+ . error
+
+
+state 187
+ othertype: '[' LDDD.']' ntype
+
+ ']' shift 310
+ . error
+
+
+state 188
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ oexpr: expr. (291)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 291 (src line 1903)
+
+
+state 189
+ othertype: LCHAN non_recvchantype. (193)
+
+ . reduce 193 (src line 1258)
+
+
+state 190
+ othertype: LCHAN LCOMM.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 311
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 191
+ non_recvchantype: fntype. (176)
+
+ . reduce 176 (src line 1210)
+
+
+state 192
+ non_recvchantype: othertype. (177)
+
+ . reduce 177 (src line 1212)
+
+
+state 193
+ non_recvchantype: ptrtype. (178)
+
+ . reduce 178 (src line 1213)
+
+
+state 194
+ non_recvchantype: dotname. (179)
+
+ . reduce 179 (src line 1214)
+
+
+state 195
+ non_recvchantype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 312
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 196
+ ptrtype: '*'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 313
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 197
+ dotname: name. (189)
+ dotname: name.'.' sym
+
+ '.' shift 314
+ . reduce 189 (src line 1234)
+
+
+state 198
+ othertype: LMAP '['.ntype ']' ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 315
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 199
+ structtype: LSTRUCT lbrace.structdcl_list osemi '}'
+ structtype: LSTRUCT lbrace.'}'
+
+ LNAME shift 325
+ '*' shift 322
+ '(' shift 321
+ '}' shift 317
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ packname goto 324
+ embed goto 320
+ new_name goto 323
+ new_name_list goto 319
+ structdcl goto 318
+ structdcl_list goto 316
+ hidden_importsym goto 11
+
+state 200
+ interfacetype: LINTERFACE lbrace.interfacedcl_list osemi '}'
+ interfacetype: LINTERFACE lbrace.'}'
+
+ LNAME shift 325
+ '(' shift 331
+ '}' shift 327
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ packname goto 330
+ interfacedcl goto 328
+ new_name goto 329
+ interfacedcl_list goto 326
+ hidden_importsym goto 11
+
+state 201
+ import: LIMPORT '(' import_stmt_list.osemi ')'
+ import_stmt_list: import_stmt_list.';' import_stmt
+ osemi: . (286)
+
+ ';' shift 333
+ . reduce 286 (src line 1893)
+
+ osemi goto 332
+
+state 202
+ import: LIMPORT '(' ')'. (10)
+
+ . reduce 10 (src line 171)
+
+
+state 203
+ import_stmt_list: import_stmt. (13)
+
+ . reduce 13 (src line 220)
+
+
+state 204
+ import_stmt: import_here import_package.import_there
+ $$21: . (21)
+
+ . reduce 21 (src line 272)
+
+ import_there goto 334
+ $$21 goto 15
+
+state 205
+ import_stmt: import_here import_there. (12)
+
+ . reduce 12 (src line 209)
+
+
+state 206
+ import_here: sym LLITERAL. (16)
+
+ . reduce 16 (src line 232)
+
+
+state 207
+ import_here: '.' LLITERAL. (17)
+
+ . reduce 17 (src line 239)
+
+
+state 208
+ hidden_importsym: '@' LLITERAL '.' LNAME. (160)
+
+ . reduce 160 (src line 1128)
+
+
+state 209
+ hidden_importsym: '@' LLITERAL '.' '?'. (161)
+
+ . reduce 161 (src line 1143)
+
+
+state 210
+ import_there: $$21 hidden_import_list '$' '$'. (22)
+
+ . reduce 22 (src line 276)
+
+
+state 211
+ hidden_import: LIMPORT LNAME.LLITERAL ';'
+
+ LLITERAL shift 335
+ . error
+
+
+state 212
+ hidden_import: LVAR hidden_pkg_importsym.hidden_type ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 336
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 213
+ hidden_pkg_importsym: hidden_importsym. (310)
+
+ . reduce 310 (src line 1985)
+
+
+state 214
+ hidden_import: LCONST hidden_pkg_importsym.'=' hidden_constant ';'
+ hidden_import: LCONST hidden_pkg_importsym.hidden_type '=' hidden_constant ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '=' shift 350
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 351
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 215
+ hidden_import: LTYPE hidden_pkgtype.hidden_type ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 352
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 216
+ hidden_pkgtype: hidden_pkg_importsym. (311)
+
+ . reduce 311 (src line 1992)
+
+
+state 217
+ hidden_import: LFUNC hidden_fndcl.fnbody ';'
+ fnbody: . (210)
+
+ '{' shift 242
+ . reduce 210 (src line 1457)
+
+ fnbody goto 353
+
+state 218
+ hidden_fndcl: hidden_pkg_importsym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 354
+ . error
+
+
+state 219
+ hidden_fndcl: '('.hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 355
+
+state 220
+ common_dcl: LVAR '(' vardcl_list.osemi ')'
+ vardcl_list: vardcl_list.';' vardcl
+ osemi: . (286)
+
+ ';' shift 359
+ . reduce 286 (src line 1893)
+
+ osemi goto 358
+
+state 221
+ common_dcl: LVAR '(' ')'. (30)
+
+ . reduce 30 (src line 314)
+
+
+state 222
+ vardcl_list: vardcl. (220)
+
+ . reduce 220 (src line 1523)
+
+
+state 223
+ vardcl: dcl_name_list ntype. (39)
+ vardcl: dcl_name_list ntype.'=' expr_list
+
+ '=' shift 360
+ . reduce 39 (src line 360)
+
+
+state 224
+ vardcl: dcl_name_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 361
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 225
+ dcl_name_list: dcl_name_list ','.dcl_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 362
+ hidden_importsym goto 11
+
+state 226
+ ntype: recvchantype. (166)
+
+ . reduce 166 (src line 1190)
+
+
+state 227
+ ntype: fntype. (167)
+
+ . reduce 167 (src line 1192)
+
+
+state 228
+ ntype: othertype. (168)
+
+ . reduce 168 (src line 1193)
+
+
+state 229
+ ntype: ptrtype. (169)
+
+ . reduce 169 (src line 1194)
+
+
+state 230
+ ntype: dotname. (170)
+
+ . reduce 170 (src line 1195)
+
+
+state 231
+ ntype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 363
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 232
+ recvchantype: LCOMM.LCHAN ntype
+
+ LCHAN shift 364
+ . error
+
+
+state 233
+ common_dcl: lconst '(' constdcl.osemi ')'
+ common_dcl: lconst '(' constdcl.';' constdcl_list osemi ')'
+ osemi: . (286)
+
+ ';' shift 366
+ . reduce 286 (src line 1893)
+
+ osemi goto 365
+
+state 234
+ common_dcl: lconst '(' ')'. (34)
+
+ . reduce 34 (src line 336)
+
+
+state 235
+ constdcl: dcl_name_list ntype.'=' expr_list
+
+ '=' shift 367
+ . error
+
+
+state 236
+ constdcl: dcl_name_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 368
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 237
+ common_dcl: LTYPE '(' typedcl_list.osemi ')'
+ typedcl_list: typedcl_list.';' typedcl
+ osemi: . (286)
+
+ ';' shift 370
+ . reduce 286 (src line 1893)
+
+ osemi goto 369
+
+state 238
+ common_dcl: LTYPE '(' ')'. (37)
+
+ . reduce 37 (src line 349)
+
+
+state 239
+ typedcl_list: typedcl. (224)
+
+ . reduce 224 (src line 1537)
+
+
+state 240
+ typedcl: typedclname ntype. (48)
+
+ . reduce 48 (src line 404)
+
+
+state 241
+ xfndcl: LFUNC fndcl fnbody. (204)
+
+ . reduce 204 (src line 1318)
+
+
+state 242
+ fnbody: '{'.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 371
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 243
+ fndcl: '(' oarg_type_list_ocomma.')' sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 372
+ . error
+
+
+state 244
+ arg_type_list: arg_type_list.',' arg_type
+ oarg_type_list_ocomma: arg_type_list.ocomma
+ ocomma: . (288)
+
+ ',' shift 373
+ . reduce 288 (src line 1896)
+
+ ocomma goto 374
+
+state 245
+ arg_type_list: arg_type. (247)
+
+ . reduce 247 (src line 1697)
+
+
+state 246
+ arg_type: name_or_type. (243)
+
+ . reduce 243 (src line 1681)
+
+
+state 247
+ name: sym. (162)
+ arg_type: sym.name_or_type
+ arg_type: sym.dotdotdot
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 162 (src line 1158)
+
+ sym goto 123
+ ntype goto 249
+ dotname goto 230
+ name goto 197
+ name_or_type goto 375
+ dotdotdot goto 376
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 248
+ arg_type: dotdotdot. (246)
+
+ . reduce 246 (src line 1695)
+
+
+state 249
+ name_or_type: ntype. (150)
+
+ . reduce 150 (src line 1073)
+
+
+state 250
+ dotdotdot: LDDD. (164)
+ dotdotdot: LDDD.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 164 (src line 1179)
+
+ sym goto 123
+ ntype goto 377
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 251
+ fndcl: sym '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 378
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 252
+ non_dcl_stmt: labelname ':' $$261.stmt
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 379
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 253
+ fntype: LFUNC '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 380
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 254
+ simple_stmt: expr LASOP expr. (50)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 50 (src line 425)
+
+
+state 255
+ expr: expr.LOROR expr
+ expr: expr LOROR expr. (94)
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 94 (src line 795)
+
+
+state 256
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr LANDAND expr. (95)
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 95 (src line 799)
+
+
+state 257
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr LEQ expr. (96)
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 96 (src line 803)
+
+
+state 258
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr LNE expr. (97)
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 97 (src line 807)
+
+
+state 259
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr LLT expr. (98)
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 98 (src line 811)
+
+
+state 260
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr LLE expr. (99)
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 99 (src line 815)
+
+
+state 261
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr LGE expr. (100)
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 100 (src line 819)
+
+
+state 262
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr LGT expr. (101)
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 101 (src line 823)
+
+
+state 263
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr '+' expr. (102)
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 102 (src line 827)
+
+
+state 264
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr '-' expr. (103)
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 103 (src line 831)
+
+
+state 265
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr '|' expr. (104)
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 104 (src line 835)
+
+
+state 266
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr '^' expr. (105)
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 105 (src line 839)
+
+
+state 267
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr '*' expr. (106)
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 106 (src line 843)
+
+
+state 268
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr '/' expr. (107)
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 107 (src line 847)
+
+
+state 269
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr '%' expr. (108)
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 108 (src line 851)
+
+
+state 270
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr '&' expr. (109)
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 109 (src line 855)
+
+
+state 271
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr LANDNOT expr. (110)
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 110 (src line 859)
+
+
+state 272
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr LLSH expr. (111)
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 111 (src line 863)
+
+
+state 273
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr LRSH expr. (112)
+ expr: expr.LCOMM expr
+
+ . reduce 112 (src line 867)
+
+
+state 274
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr: expr LCOMM expr. (113)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 113 (src line 872)
+
+
+state 275
+ simple_stmt: expr_list '=' expr_list. (51)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 51 (src line 430)
+
+
+state 276
+ simple_stmt: expr_list LCOLAS expr_list. (52)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 52 (src line 442)
+
+
+state 277
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr_list ',' expr. (277)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 277 (src line 1845)
+
+
+state 278
+ for_stmt: LFOR $$74 for_body. (75)
+
+ . reduce 75 (src line 664)
+
+
+state 279
+ for_body: for_header.loop_body
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 381
+
+state 280
+ for_header: osimple_stmt.';' osimple_stmt ';' osimple_stmt
+ for_header: osimple_stmt. (71)
+
+ ';' shift 383
+ . reduce 71 (src line 644)
+
+
+state 281
+ for_header: range_stmt. (72)
+
+ . reduce 72 (src line 650)
+
+
+state 282
+ osimple_stmt: simple_stmt. (295)
+
+ . reduce 295 (src line 1915)
+
+
+state 283
+ simple_stmt: expr_list.'=' expr_list
+ simple_stmt: expr_list.LCOLAS expr_list
+ range_stmt: expr_list.'=' LRANGE expr
+ range_stmt: expr_list.LCOLAS LRANGE expr
+ expr_list: expr_list.',' expr
+
+ LCOLAS shift 385
+ '=' shift 384
+ ',' shift 155
+ . error
+
+
+state 284
+ range_stmt: LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 386
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 285
+ switch_stmt: LSWITCH $$88 if_header.$$89 LBODY caseblock_list '}'
+ $$89: . (89)
+
+ . reduce 89 (src line 759)
+
+ $$89 goto 387
+
+state 286
+ if_header: osimple_stmt. (76)
+ if_header: osimple_stmt.';' osimple_stmt
+
+ ';' shift 388
+ . reduce 76 (src line 670)
+
+
+state 287
+ select_stmt: LSELECT $$91 LBODY.caseblock_list '}'
+ caseblock_list: . (63)
+
+ . reduce 63 (src line 590)
+
+ caseblock_list goto 389
+
+state 288
+ if_stmt: LIF $$78 if_header.$$79 loop_body $$80 elseif_list else
+ $$79: . (79)
+
+ . reduce 79 (src line 693)
+
+ $$79 goto 390
+
+state 289
+ pseudocall: pexpr '(' ')'. (123)
+
+ . reduce 123 (src line 924)
+
+
+state 290
+ pseudocall: pexpr '(' expr_or_type_list.ocomma ')'
+ pseudocall: pexpr '(' expr_or_type_list.LDDD ocomma ')'
+ expr_or_type_list: expr_or_type_list.',' expr_or_type
+ ocomma: . (288)
+
+ LDDD shift 392
+ ',' shift 393
+ . reduce 288 (src line 1896)
+
+ ocomma goto 391
+
+state 291
+ expr_or_type_list: expr_or_type. (278)
+
+ . reduce 278 (src line 1850)
+
+
+state 292
+ pexpr_no_paren: pexpr '.' sym. (128)
+
+ . reduce 128 (src line 947)
+
+
+state 293
+ pexpr_no_paren: pexpr '.' '('.expr_or_type ')'
+ pexpr_no_paren: pexpr '.' '('.LTYPE ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LTYPE shift 395
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 394
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 294
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ pexpr_no_paren: pexpr '[' expr.']'
+ oexpr: expr. (291)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ']' shift 396
+ . reduce 291 (src line 1903)
+
+
+state 295
+ pexpr_no_paren: pexpr '[' oexpr.':' oexpr ']'
+ pexpr_no_paren: pexpr '[' oexpr.':' oexpr ':' oexpr ']'
+
+ ':' shift 397
+ . error
+
+
+state 296
+ pexpr_no_paren: pexpr_no_paren '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 398
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 297
+ pexpr_no_paren: '(' expr_or_type ')'.'{' start_complit braced_keyval_list '}'
+ pexpr: '(' expr_or_type ')'. (147)
+
+ '{' shift 404
+ . reduce 147 (src line 1056)
+
+
+state 298
+ non_expr_type: '*' non_expr_type. (175)
+
+ . reduce 175 (src line 1205)
+
+
+state 299
+ othertype: LCHAN.non_recvchantype
+ othertype: LCHAN.LCOMM ntype
+ recvchantype: LCOMM LCHAN.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 405
+ '*' shift 196
+ '(' shift 411
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 406
+ dotname goto 410
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 409
+ recvchantype goto 226
+ non_recvchantype goto 189
+ othertype goto 408
+ fntype goto 407
+ hidden_importsym goto 11
+
+state 300
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ pexpr_no_paren: convtype '(' expr.ocomma ')'
+ ocomma: . (288)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ',' shift 413
+ . reduce 288 (src line 1896)
+
+ ocomma goto 412
+
+state 301
+ pexpr_no_paren: comptype lbrace start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 414
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 302
+ fnliteral: fnlitdcl lbrace stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 415
+ . error
+
+
+state 303
+ stmt_list: stmt. (270)
+
+ . reduce 270 (src line 1804)
+
+
+state 304
+ stmt: compound_stmt. (252)
+
+ . reduce 252 (src line 1723)
+
+
+state 305
+ stmt: common_dcl. (253)
+
+ . reduce 253 (src line 1724)
+
+
+state 306
+ stmt: non_dcl_stmt. (254)
+
+ . reduce 254 (src line 1728)
+
+
+state 307
+ stmt: error. (255)
+
+ . reduce 255 (src line 1729)
+
+
+state 308
+ compound_stmt: '{'.$$59 stmt_list '}'
+ $$59: . (59)
+
+ . reduce 59 (src line 544)
+
+ $$59 goto 417
+
+state 309
+ othertype: '[' oexpr ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 418
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 310
+ othertype: '[' LDDD ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 419
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 311
+ othertype: LCHAN LCOMM ntype. (194)
+
+ . reduce 194 (src line 1263)
+
+
+state 312
+ non_recvchantype: '(' ntype.')'
+
+ ')' shift 420
+ . error
+
+
+state 313
+ ptrtype: '*' ntype. (198)
+
+ . reduce 198 (src line 1275)
+
+
+state 314
+ dotname: name '.'.sym
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 421
+ hidden_importsym goto 11
+
+state 315
+ othertype: LMAP '[' ntype.']' ntype
+
+ ']' shift 422
+ . error
+
+
+state 316
+ structtype: LSTRUCT lbrace structdcl_list.osemi '}'
+ structdcl_list: structdcl_list.';' structdcl
+ osemi: . (286)
+
+ ';' shift 424
+ . reduce 286 (src line 1893)
+
+ osemi goto 423
+
+state 317
+ structtype: LSTRUCT lbrace '}'. (201)
+
+ . reduce 201 (src line 1295)
+
+
+state 318
+ structdcl_list: structdcl. (226)
+
+ . reduce 226 (src line 1547)
+
+
+state 319
+ structdcl: new_name_list.ntype oliteral
+ new_name_list: new_name_list.',' new_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 426
+ . error
+
+ sym goto 123
+ ntype goto 425
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 320
+ structdcl: embed.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 427
+
+state 321
+ structdcl: '('.embed ')' oliteral
+ structdcl: '('.'*' embed ')' oliteral
+
+ LNAME shift 431
+ '*' shift 430
+ . error
+
+ packname goto 324
+ embed goto 429
+
+state 322
+ structdcl: '*'.embed oliteral
+ structdcl: '*'.'(' embed ')' oliteral
+
+ LNAME shift 431
+ '(' shift 433
+ . error
+
+ packname goto 324
+ embed goto 432
+
+state 323
+ new_name_list: new_name. (272)
+
+ . reduce 272 (src line 1820)
+
+
+state 324
+ embed: packname. (238)
+
+ . reduce 238 (src line 1646)
+
+
+state 325
+ sym: LNAME. (157)
+ packname: LNAME. (236)
+ packname: LNAME.'.' sym
+
+ LLITERAL reduce 236 (src line 1621)
+ ';' reduce 236 (src line 1621)
+ '.' shift 434
+ '}' reduce 236 (src line 1621)
+ . reduce 157 (src line 1113)
+
+
+state 326
+ interfacetype: LINTERFACE lbrace interfacedcl_list.osemi '}'
+ interfacedcl_list: interfacedcl_list.';' interfacedcl
+ osemi: . (286)
+
+ ';' shift 436
+ . reduce 286 (src line 1893)
+
+ osemi goto 435
+
+state 327
+ interfacetype: LINTERFACE lbrace '}'. (203)
+
+ . reduce 203 (src line 1308)
+
+
+state 328
+ interfacedcl_list: interfacedcl. (228)
+
+ . reduce 228 (src line 1554)
+
+
+state 329
+ interfacedcl: new_name.indcl
+
+ '(' shift 438
+ . error
+
+ indcl goto 437
+
+state 330
+ interfacedcl: packname. (240)
+
+ . reduce 240 (src line 1658)
+
+
+state 331
+ interfacedcl: '('.packname ')'
+
+ LNAME shift 431
+ . error
+
+ packname goto 439
+
+state 332
+ import: LIMPORT '(' import_stmt_list osemi.')'
+
+ ')' shift 440
+ . error
+
+
+state 333
+ import_stmt_list: import_stmt_list ';'.import_stmt
+ osemi: ';'. (287)
+
+ LLITERAL shift 88
+ LNAME shift 10
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 441
+
+state 334
+ import_stmt: import_here import_package import_there. (11)
+
+ . reduce 11 (src line 173)
+
+
+state 335
+ hidden_import: LIMPORT LNAME LLITERAL.';'
+
+ ';' shift 442
+ . error
+
+
+state 336
+ hidden_import: LVAR hidden_pkg_importsym hidden_type.';'
+
+ ';' shift 443
+ . error
+
+
+state 337
+ hidden_type: hidden_type_misc. (312)
+
+ . reduce 312 (src line 2003)
+
+
+state 338
+ hidden_type: hidden_type_recv_chan. (313)
+
+ . reduce 313 (src line 2005)
+
+
+state 339
+ hidden_type: hidden_type_func. (314)
+
+ . reduce 314 (src line 2006)
+
+
+state 340
+ hidden_type_misc: hidden_importsym. (317)
+
+ . reduce 317 (src line 2012)
+
+
+state 341
+ hidden_type_misc: LNAME. (318)
+
+ . reduce 318 (src line 2017)
+
+
+state 342
+ hidden_type_misc: '['.']' hidden_type
+ hidden_type_misc: '['.LLITERAL ']' hidden_type
+
+ LLITERAL shift 445
+ ']' shift 444
+ . error
+
+
+state 343
+ hidden_type_misc: LMAP.'[' hidden_type ']' hidden_type
+
+ '[' shift 446
+ . error
+
+
+state 344
+ hidden_type_misc: LSTRUCT.'{' ohidden_structdcl_list '}'
+
+ '{' shift 447
+ . error
+
+
+state 345
+ hidden_type_misc: LINTERFACE.'{' ohidden_interfacedcl_list '}'
+
+ '{' shift 448
+ . error
+
+
+state 346
+ hidden_type_misc: '*'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 449
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 347
+ hidden_type_misc: LCHAN.hidden_type_non_recv_chan
+ hidden_type_misc: LCHAN.'(' hidden_type_recv_chan ')'
+ hidden_type_misc: LCHAN.LCOMM hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 452
+ '*' shift 346
+ '(' shift 451
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type_misc goto 453
+ hidden_type_func goto 454
+ hidden_type_non_recv_chan goto 450
+
+state 348
+ hidden_type_recv_chan: LCOMM.LCHAN hidden_type
+
+ LCHAN shift 455
+ . error
+
+
+state 349
+ hidden_type_func: LFUNC.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 456
+ . error
+
+
+state 350
+ hidden_import: LCONST hidden_pkg_importsym '='.hidden_constant ';'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '(' shift 459
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_constant goto 457
+ hidden_literal goto 458
+
+state 351
+ hidden_import: LCONST hidden_pkg_importsym hidden_type.'=' hidden_constant ';'
+
+ '=' shift 463
+ . error
+
+
+state 352
+ hidden_import: LTYPE hidden_pkgtype hidden_type.';'
+
+ ';' shift 464
+ . error
+
+
+state 353
+ hidden_import: LFUNC hidden_fndcl fnbody.';'
+
+ ';' shift 465
+ . error
+
+
+state 354
+ hidden_fndcl: hidden_pkg_importsym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 466
+
+state 355
+ hidden_fndcl: '(' hidden_funarg_list.')' sym '(' ohidden_funarg_list ')' ohidden_funres
+ hidden_funarg_list: hidden_funarg_list.',' hidden_funarg
+
+ ')' shift 468
+ ',' shift 469
+ . error
+
+
+state 356
+ hidden_funarg_list: hidden_funarg. (346)
+
+ . reduce 346 (src line 2212)
+
+
+state 357
+ hidden_funarg: sym.hidden_type oliteral
+ hidden_funarg: sym.LDDD hidden_type oliteral
+
+ LCHAN shift 347
+ LDDD shift 471
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 470
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 358
+ common_dcl: LVAR '(' vardcl_list osemi.')'
+
+ ')' shift 472
+ . error
+
+
+state 359
+ vardcl_list: vardcl_list ';'.vardcl
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 473
+ hidden_importsym goto 11
+
+state 360
+ vardcl: dcl_name_list ntype '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 474
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 361
+ vardcl: dcl_name_list '=' expr_list. (41)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 41 (src line 369)
+
+
+state 362
+ dcl_name_list: dcl_name_list ',' dcl_name. (275)
+
+ . reduce 275 (src line 1835)
+
+
+state 363
+ ntype: '(' ntype.')'
+
+ ')' shift 475
+ . error
+
+
+state 364
+ recvchantype: LCOMM LCHAN.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 406
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 365
+ common_dcl: lconst '(' constdcl osemi.')'
+
+ ')' shift 476
+ . error
+
+
+state 366
+ common_dcl: lconst '(' constdcl ';'.constdcl_list osemi ')'
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 480
+ constdcl goto 479
+ constdcl1 goto 478
+ constdcl_list goto 477
+ hidden_importsym goto 11
+
+state 367
+ constdcl: dcl_name_list ntype '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 481
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 368
+ constdcl: dcl_name_list '=' expr_list. (43)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 43 (src line 379)
+
+
+state 369
+ common_dcl: LTYPE '(' typedcl_list osemi.')'
+
+ ')' shift 482
+ . error
+
+
+state 370
+ typedcl_list: typedcl_list ';'.typedcl
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 483
+ hidden_importsym goto 11
+
+state 371
+ fnbody: '{' stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 484
+ . error
+
+
+state 372
+ fndcl: '(' oarg_type_list_ocomma ')'.sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 485
+ dotname goto 493
+ name goto 197
+ fnres goto 486
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 373
+ arg_type_list: arg_type_list ','.arg_type
+ ocomma: ','. (289)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 494
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 374
+ oarg_type_list_ocomma: arg_type_list ocomma. (250)
+
+ . reduce 250 (src line 1711)
+
+
+state 375
+ arg_type: sym name_or_type. (244)
+
+ . reduce 244 (src line 1683)
+
+
+state 376
+ arg_type: sym dotdotdot. (245)
+
+ . reduce 245 (src line 1689)
+
+
+state 377
+ dotdotdot: LDDD ntype. (165)
+
+ . reduce 165 (src line 1185)
+
+
+state 378
+ fndcl: sym '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 495
+ . error
+
+
+state 379
+ non_dcl_stmt: labelname ':' $$261 stmt. (262)
+
+ . reduce 262 (src line 1745)
+
+
+state 380
+ fntype: LFUNC '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 496
+ . error
+
+
+state 381
+ for_body: for_header loop_body. (73)
+
+ . reduce 73 (src line 652)
+
+
+state 382
+ loop_body: LBODY.$$65 stmt_list '}'
+ $$65: . (65)
+
+ . reduce 65 (src line 599)
+
+ $$65 goto 497
+
+state 383
+ for_header: osimple_stmt ';'.osimple_stmt ';' osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 498
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 384
+ simple_stmt: expr_list '='.expr_list
+ range_stmt: expr_list '='.LRANGE expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 499
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 275
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 385
+ simple_stmt: expr_list LCOLAS.expr_list
+ range_stmt: expr_list LCOLAS.LRANGE expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 500
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 276
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 386
+ range_stmt: LRANGE expr. (69)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 69 (src line 624)
+
+
+state 387
+ switch_stmt: LSWITCH $$88 if_header $$89.LBODY caseblock_list '}'
+
+ LBODY shift 501
+ . error
+
+
+state 388
+ if_header: osimple_stmt ';'.osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 502
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 389
+ caseblock_list: caseblock_list.caseblock
+ select_stmt: LSELECT $$91 LBODY caseblock_list.'}'
+
+ LCASE shift 506
+ LDEFAULT shift 507
+ '}' shift 504
+ . error
+
+ case goto 505
+ caseblock goto 503
+
+state 390
+ if_stmt: LIF $$78 if_header $$79.loop_body $$80 elseif_list else
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 508
+
+state 391
+ pseudocall: pexpr '(' expr_or_type_list ocomma.')'
+
+ ')' shift 509
+ . error
+
+
+state 392
+ pseudocall: pexpr '(' expr_or_type_list LDDD.ocomma ')'
+ ocomma: . (288)
+
+ ',' shift 413
+ . reduce 288 (src line 1896)
+
+ ocomma goto 510
+
+state 393
+ expr_or_type_list: expr_or_type_list ','.expr_or_type
+ ocomma: ','. (289)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 511
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 394
+ pexpr_no_paren: pexpr '.' '(' expr_or_type.')'
+
+ ')' shift 512
+ . error
+
+
+state 395
+ pexpr_no_paren: pexpr '.' '(' LTYPE.')'
+
+ ')' shift 513
+ . error
+
+
+state 396
+ pexpr_no_paren: pexpr '[' expr ']'. (131)
+
+ . reduce 131 (src line 966)
+
+
+state 397
+ pexpr_no_paren: pexpr '[' oexpr ':'.oexpr ']'
+ pexpr_no_paren: pexpr '[' oexpr ':'.oexpr ':' oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 514
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 398
+ pexpr_no_paren: pexpr_no_paren '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 515
+ . error
+
+
+state 399
+ keyval_list: keyval_list.',' keyval
+ keyval_list: keyval_list.',' bare_complitexpr
+ braced_keyval_list: keyval_list.ocomma
+ ocomma: . (288)
+
+ ',' shift 516
+ . reduce 288 (src line 1896)
+
+ ocomma goto 517
+
+state 400
+ keyval_list: keyval. (280)
+
+ . reduce 280 (src line 1863)
+
+
+state 401
+ keyval_list: bare_complitexpr. (281)
+
+ . reduce 281 (src line 1868)
+
+
+state 402
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ keyval: expr.':' complitexpr
+ bare_complitexpr: expr. (142)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 518
+ . reduce 142 (src line 1026)
+
+
+state 403
+ bare_complitexpr: '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 519
+
+state 404
+ pexpr_no_paren: '(' expr_or_type ')' '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 520
+
+state 405
+ othertype: LCHAN LCOMM.ntype
+ recvchantype: LCOMM.LCHAN ntype
+
+ LCHAN shift 299
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 311
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 406
+ recvchantype: LCOMM LCHAN ntype. (199)
+
+ . reduce 199 (src line 1281)
+
+
+state 407
+ ntype: fntype. (167)
+ non_recvchantype: fntype. (176)
+
+ LBODY reduce 176 (src line 1210)
+ '(' reduce 176 (src line 1210)
+ '{' reduce 176 (src line 1210)
+ . reduce 167 (src line 1192)
+
+
+state 408
+ ntype: othertype. (168)
+ non_recvchantype: othertype. (177)
+
+ LBODY reduce 177 (src line 1212)
+ '(' reduce 177 (src line 1212)
+ '{' reduce 177 (src line 1212)
+ . reduce 168 (src line 1193)
+
+
+state 409
+ ntype: ptrtype. (169)
+ non_recvchantype: ptrtype. (178)
+
+ LBODY reduce 178 (src line 1213)
+ '(' reduce 178 (src line 1213)
+ '{' reduce 178 (src line 1213)
+ . reduce 169 (src line 1194)
+
+
+state 410
+ ntype: dotname. (170)
+ non_recvchantype: dotname. (179)
+
+ LBODY reduce 179 (src line 1214)
+ '(' reduce 179 (src line 1214)
+ '{' reduce 179 (src line 1214)
+ . reduce 170 (src line 1195)
+
+
+state 411
+ ntype: '('.ntype ')'
+ non_recvchantype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 521
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 412
+ pexpr_no_paren: convtype '(' expr ocomma.')'
+
+ ')' shift 522
+ . error
+
+
+state 413
+ ocomma: ','. (289)
+
+ . reduce 289 (src line 1897)
+
+
+state 414
+ pexpr_no_paren: comptype lbrace start_complit braced_keyval_list.'}'
+
+ '}' shift 523
+ . error
+
+
+state 415
+ fnliteral: fnlitdcl lbrace stmt_list '}'. (216)
+
+ . reduce 216 (src line 1490)
+
+
+state 416
+ stmt_list: stmt_list ';'.stmt
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 524
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 417
+ compound_stmt: '{' $$59.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 525
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 418
+ othertype: '[' oexpr ']' ntype. (191)
+
+ . reduce 191 (src line 1248)
+
+
+state 419
+ othertype: '[' LDDD ']' ntype. (192)
+
+ . reduce 192 (src line 1253)
+
+
+state 420
+ non_recvchantype: '(' ntype ')'. (180)
+
+ . reduce 180 (src line 1215)
+
+
+state 421
+ dotname: name '.' sym. (190)
+
+ . reduce 190 (src line 1236)
+
+
+state 422
+ othertype: LMAP '[' ntype ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 526
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 423
+ structtype: LSTRUCT lbrace structdcl_list osemi.'}'
+
+ '}' shift 527
+ . error
+
+
+state 424
+ structdcl_list: structdcl_list ';'.structdcl
+ osemi: ';'. (287)
+
+ LNAME shift 325
+ '*' shift 322
+ '(' shift 321
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 119
+ packname goto 324
+ embed goto 320
+ new_name goto 323
+ new_name_list goto 319
+ structdcl goto 528
+ hidden_importsym goto 11
+
+state 425
+ structdcl: new_name_list ntype.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 529
+
+state 426
+ new_name_list: new_name_list ','.new_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ new_name goto 530
+ hidden_importsym goto 11
+
+state 427
+ structdcl: embed oliteral. (231)
+
+ . reduce 231 (src line 1589)
+
+
+state 428
+ oliteral: LLITERAL. (303)
+
+ . reduce 303 (src line 1939)
+
+
+state 429
+ structdcl: '(' embed.')' oliteral
+
+ ')' shift 531
+ . error
+
+
+state 430
+ structdcl: '(' '*'.embed ')' oliteral
+
+ LNAME shift 431
+ . error
+
+ packname goto 324
+ embed goto 532
+
+state 431
+ packname: LNAME. (236)
+ packname: LNAME.'.' sym
+
+ '.' shift 434
+ . reduce 236 (src line 1621)
+
+
+state 432
+ structdcl: '*' embed.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 533
+
+state 433
+ structdcl: '*' '('.embed ')' oliteral
+
+ LNAME shift 431
+ . error
+
+ packname goto 324
+ embed goto 534
+
+state 434
+ packname: LNAME '.'.sym
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 535
+ hidden_importsym goto 11
+
+state 435
+ interfacetype: LINTERFACE lbrace interfacedcl_list osemi.'}'
+
+ '}' shift 536
+ . error
+
+
+state 436
+ interfacedcl_list: interfacedcl_list ';'.interfacedcl
+ osemi: ';'. (287)
+
+ LNAME shift 325
+ '(' shift 331
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 119
+ packname goto 330
+ interfacedcl goto 537
+ new_name goto 329
+ hidden_importsym goto 11
+
+state 437
+ interfacedcl: new_name indcl. (239)
+
+ . reduce 239 (src line 1652)
+
+
+state 438
+ indcl: '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 538
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 439
+ interfacedcl: '(' packname.')'
+
+ ')' shift 539
+ . error
+
+
+state 440
+ import: LIMPORT '(' import_stmt_list osemi ')'. (9)
+
+ . reduce 9 (src line 170)
+
+
+state 441
+ import_stmt_list: import_stmt_list ';' import_stmt. (14)
+
+ . reduce 14 (src line 222)
+
+
+state 442
+ hidden_import: LIMPORT LNAME LLITERAL ';'. (304)
+
+ . reduce 304 (src line 1944)
+
+
+state 443
+ hidden_import: LVAR hidden_pkg_importsym hidden_type ';'. (305)
+
+ . reduce 305 (src line 1949)
+
+
+state 444
+ hidden_type_misc: '[' ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 540
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 445
+ hidden_type_misc: '[' LLITERAL.']' hidden_type
+
+ ']' shift 541
+ . error
+
+
+state 446
+ hidden_type_misc: LMAP '['.hidden_type ']' hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 542
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 447
+ hidden_type_misc: LSTRUCT '{'.ohidden_structdcl_list '}'
+ ohidden_structdcl_list: . (298)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 298 (src line 1923)
+
+ sym goto 546
+ hidden_importsym goto 11
+ hidden_structdcl goto 545
+ hidden_structdcl_list goto 544
+ ohidden_structdcl_list goto 543
+
+state 448
+ hidden_type_misc: LINTERFACE '{'.ohidden_interfacedcl_list '}'
+ ohidden_interfacedcl_list: . (300)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 552
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '?' shift 12
+ '@' shift 13
+ . reduce 300 (src line 1929)
+
+ sym goto 550
+ hidden_importsym goto 553
+ hidden_interfacedcl goto 549
+ hidden_interfacedcl_list goto 548
+ ohidden_interfacedcl_list goto 547
+ hidden_type goto 551
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 449
+ hidden_type_misc: '*' hidden_type. (324)
+
+ . reduce 324 (src line 2048)
+
+
+state 450
+ hidden_type_misc: LCHAN hidden_type_non_recv_chan. (325)
+
+ . reduce 325 (src line 2052)
+
+
+state 451
+ hidden_type_misc: LCHAN '('.hidden_type_recv_chan ')'
+
+ LCOMM shift 348
+ . error
+
+ hidden_type_recv_chan goto 554
+
+state 452
+ hidden_type_misc: LCHAN LCOMM.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 555
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 453
+ hidden_type_non_recv_chan: hidden_type_misc. (315)
+
+ . reduce 315 (src line 2008)
+
+
+state 454
+ hidden_type_non_recv_chan: hidden_type_func. (316)
+
+ . reduce 316 (src line 2010)
+
+
+state 455
+ hidden_type_recv_chan: LCOMM LCHAN.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 556
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 456
+ hidden_type_func: LFUNC '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 557
+
+state 457
+ hidden_import: LCONST hidden_pkg_importsym '=' hidden_constant.';'
+
+ ';' shift 558
+ . error
+
+
+state 458
+ hidden_constant: hidden_literal. (342)
+
+ . reduce 342 (src line 2195)
+
+
+state 459
+ hidden_constant: '('.hidden_literal '+' hidden_literal ')'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_literal goto 559
+
+state 460
+ hidden_literal: LLITERAL. (339)
+
+ . reduce 339 (src line 2164)
+
+
+state 461
+ hidden_literal: '-'.LLITERAL
+
+ LLITERAL shift 560
+ . error
+
+
+state 462
+ hidden_literal: sym. (341)
+
+ . reduce 341 (src line 2187)
+
+
+state 463
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '='.hidden_constant ';'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '(' shift 459
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_constant goto 561
+ hidden_literal goto 458
+
+state 464
+ hidden_import: LTYPE hidden_pkgtype hidden_type ';'. (308)
+
+ . reduce 308 (src line 1961)
+
+
+state 465
+ hidden_import: LFUNC hidden_fndcl fnbody ';'. (309)
+
+ . reduce 309 (src line 1965)
+
+
+state 466
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 562
+ . error
+
+
+state 467
+ ohidden_funarg_list: hidden_funarg_list. (297)
+ hidden_funarg_list: hidden_funarg_list.',' hidden_funarg
+
+ ',' shift 469
+ . reduce 297 (src line 1921)
+
+
+state 468
+ hidden_fndcl: '(' hidden_funarg_list ')'.sym '(' ohidden_funarg_list ')' ohidden_funres
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 563
+ hidden_importsym goto 11
+
+state 469
+ hidden_funarg_list: hidden_funarg_list ','.hidden_funarg
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 564
+
+state 470
+ hidden_funarg: sym hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 565
+
+state 471
+ hidden_funarg: sym LDDD.hidden_type oliteral
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 566
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 472
+ common_dcl: LVAR '(' vardcl_list osemi ')'. (29)
+
+ . reduce 29 (src line 310)
+
+
+state 473
+ vardcl_list: vardcl_list ';' vardcl. (221)
+
+ . reduce 221 (src line 1525)
+
+
+state 474
+ vardcl: dcl_name_list ntype '=' expr_list. (40)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 40 (src line 365)
+
+
+state 475
+ ntype: '(' ntype ')'. (171)
+
+ . reduce 171 (src line 1196)
+
+
+state 476
+ common_dcl: lconst '(' constdcl osemi ')'. (32)
+
+ . reduce 32 (src line 324)
+
+
+state 477
+ common_dcl: lconst '(' constdcl ';' constdcl_list.osemi ')'
+ constdcl_list: constdcl_list.';' constdcl1
+ osemi: . (286)
+
+ ';' shift 568
+ . reduce 286 (src line 1893)
+
+ osemi goto 567
+
+state 478
+ constdcl_list: constdcl1. (222)
+
+ . reduce 222 (src line 1530)
+
+
+state 479
+ constdcl1: constdcl. (44)
+
+ . reduce 44 (src line 384)
+
+
+state 480
+ constdcl: dcl_name_list.ntype '=' expr_list
+ constdcl: dcl_name_list.'=' expr_list
+ constdcl1: dcl_name_list.ntype
+ constdcl1: dcl_name_list. (46)
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 236
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . reduce 46 (src line 390)
+
+ sym goto 123
+ ntype goto 569
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 481
+ constdcl: dcl_name_list ntype '=' expr_list. (42)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 42 (src line 374)
+
+
+state 482
+ common_dcl: LTYPE '(' typedcl_list osemi ')'. (36)
+
+ . reduce 36 (src line 345)
+
+
+state 483
+ typedcl_list: typedcl_list ';' typedcl. (225)
+
+ . reduce 225 (src line 1542)
+
+
+state 484
+ fnbody: '{' stmt_list '}'. (211)
+
+ . reduce 211 (src line 1461)
+
+
+state 485
+ name: sym. (162)
+ fndcl: '(' oarg_type_list_ocomma ')' sym.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 570
+ . reduce 162 (src line 1158)
+
+
+state 486
+ fntype: LFUNC '(' oarg_type_list_ocomma ')' fnres. (209)
+
+ . reduce 209 (src line 1448)
+
+
+state 487
+ fnres: fnret_type. (213)
+
+ . reduce 213 (src line 1474)
+
+
+state 488
+ fnres: '('.oarg_type_list_ocomma ')'
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 571
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 489
+ fnret_type: recvchantype. (184)
+
+ . reduce 184 (src line 1227)
+
+
+state 490
+ fnret_type: fntype. (185)
+
+ . reduce 185 (src line 1229)
+
+
+state 491
+ fnret_type: othertype. (186)
+
+ . reduce 186 (src line 1230)
+
+
+state 492
+ fnret_type: ptrtype. (187)
+
+ . reduce 187 (src line 1231)
+
+
+state 493
+ fnret_type: dotname. (188)
+
+ . reduce 188 (src line 1232)
+
+
+state 494
+ arg_type_list: arg_type_list ',' arg_type. (248)
+
+ . reduce 248 (src line 1702)
+
+
+state 495
+ fndcl: sym '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 572
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 496
+ fntype: LFUNC '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 486
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 497
+ loop_body: LBODY $$65.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 573
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 498
+ for_header: osimple_stmt ';' osimple_stmt.';' osimple_stmt
+
+ ';' shift 574
+ . error
+
+
+state 499
+ range_stmt: expr_list '=' LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 575
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 500
+ range_stmt: expr_list LCOLAS LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 576
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 501
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY.caseblock_list '}'
+ caseblock_list: . (63)
+
+ . reduce 63 (src line 590)
+
+ caseblock_list goto 577
+
+state 502
+ if_header: osimple_stmt ';' osimple_stmt. (77)
+
+ . reduce 77 (src line 677)
+
+
+state 503
+ caseblock_list: caseblock_list caseblock. (64)
+
+ . reduce 64 (src line 594)
+
+
+state 504
+ select_stmt: LSELECT $$91 LBODY caseblock_list '}'. (92)
+
+ . reduce 92 (src line 782)
+
+
+state 505
+ caseblock: case.$$61 stmt_list
+ $$61: . (61)
+
+ . reduce 61 (src line 559)
+
+ $$61 goto 578
+
+state 506
+ case: LCASE.expr_or_type_list ':'
+ case: LCASE.expr_or_type_list '=' expr ':'
+ case: LCASE.expr_or_type_list LCOLAS expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 291
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_or_type_list goto 579
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 507
+ case: LDEFAULT.':'
+
+ ':' shift 580
+ . error
+
+
+state 508
+ if_stmt: LIF $$78 if_header $$79 loop_body.$$80 elseif_list else
+ $$80: . (80)
+
+ . reduce 80 (src line 699)
+
+ $$80 goto 581
+
+state 509
+ pseudocall: pexpr '(' expr_or_type_list ocomma ')'. (124)
+
+ . reduce 124 (src line 929)
+
+
+state 510
+ pseudocall: pexpr '(' expr_or_type_list LDDD ocomma.')'
+
+ ')' shift 582
+ . error
+
+
+state 511
+ expr_or_type_list: expr_or_type_list ',' expr_or_type. (279)
+
+ . reduce 279 (src line 1855)
+
+
+state 512
+ pexpr_no_paren: pexpr '.' '(' expr_or_type ')'. (129)
+
+ . reduce 129 (src line 958)
+
+
+state 513
+ pexpr_no_paren: pexpr '.' '(' LTYPE ')'. (130)
+
+ . reduce 130 (src line 962)
+
+
+state 514
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr.']'
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr.':' oexpr ']'
+
+ ':' shift 584
+ ']' shift 583
+ . error
+
+
+state 515
+ pexpr_no_paren: pexpr_no_paren '{' start_complit braced_keyval_list '}'. (137)
+
+ . reduce 137 (src line 998)
+
+
+state 516
+ keyval_list: keyval_list ','.keyval
+ keyval_list: keyval_list ','.bare_complitexpr
+ ocomma: ','. (289)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 586
+ fnliteral goto 73
+ keyval goto 585
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 517
+ braced_keyval_list: keyval_list ocomma. (285)
+
+ . reduce 285 (src line 1885)
+
+
+state 518
+ keyval: expr ':'.complitexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 589
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 588
+ complitexpr goto 587
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 519
+ bare_complitexpr: '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 590
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 520
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 591
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 521
+ ntype: '(' ntype.')'
+ non_recvchantype: '(' ntype.')'
+
+ ')' shift 592
+ . error
+
+
+state 522
+ pexpr_no_paren: convtype '(' expr ocomma ')'. (135)
+
+ . reduce 135 (src line 985)
+
+
+state 523
+ pexpr_no_paren: comptype lbrace start_complit braced_keyval_list '}'. (136)
+
+ . reduce 136 (src line 991)
+
+
+state 524
+ stmt_list: stmt_list ';' stmt. (271)
+
+ . reduce 271 (src line 1812)
+
+
+state 525
+ compound_stmt: '{' $$59 stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 593
+ . error
+
+
+state 526
+ othertype: LMAP '[' ntype ']' ntype. (195)
+
+ . reduce 195 (src line 1268)
+
+
+state 527
+ structtype: LSTRUCT lbrace structdcl_list osemi '}'. (200)
+
+ . reduce 200 (src line 1288)
+
+
+state 528
+ structdcl_list: structdcl_list ';' structdcl. (227)
+
+ . reduce 227 (src line 1549)
+
+
+state 529
+ structdcl: new_name_list ntype oliteral. (230)
+
+ . reduce 230 (src line 1564)
+
+
+state 530
+ new_name_list: new_name_list ',' new_name. (273)
+
+ . reduce 273 (src line 1825)
+
+
+state 531
+ structdcl: '(' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 594
+
+state 532
+ structdcl: '(' '*' embed.')' oliteral
+
+ ')' shift 595
+ . error
+
+
+state 533
+ structdcl: '*' embed oliteral. (233)
+
+ . reduce 233 (src line 1600)
+
+
+state 534
+ structdcl: '*' '(' embed.')' oliteral
+
+ ')' shift 596
+ . error
+
+
+state 535
+ packname: LNAME '.' sym. (237)
+
+ . reduce 237 (src line 1632)
+
+
+state 536
+ interfacetype: LINTERFACE lbrace interfacedcl_list osemi '}'. (202)
+
+ . reduce 202 (src line 1301)
+
+
+state 537
+ interfacedcl_list: interfacedcl_list ';' interfacedcl. (229)
+
+ . reduce 229 (src line 1559)
+
+
+state 538
+ indcl: '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 597
+ . error
+
+
+state 539
+ interfacedcl: '(' packname ')'. (241)
+
+ . reduce 241 (src line 1662)
+
+
+state 540
+ hidden_type_misc: '[' ']' hidden_type. (319)
+
+ . reduce 319 (src line 2028)
+
+
+state 541
+ hidden_type_misc: '[' LLITERAL ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 598
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 542
+ hidden_type_misc: LMAP '[' hidden_type.']' hidden_type
+
+ ']' shift 599
+ . error
+
+
+state 543
+ hidden_type_misc: LSTRUCT '{' ohidden_structdcl_list.'}'
+
+ '}' shift 600
+ . error
+
+
+state 544
+ ohidden_structdcl_list: hidden_structdcl_list. (299)
+ hidden_structdcl_list: hidden_structdcl_list.';' hidden_structdcl
+
+ ';' shift 601
+ . reduce 299 (src line 1927)
+
+
+state 545
+ hidden_structdcl_list: hidden_structdcl. (348)
+
+ . reduce 348 (src line 2222)
+
+
+state 546
+ hidden_structdcl: sym.hidden_type oliteral
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 602
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 547
+ hidden_type_misc: LINTERFACE '{' ohidden_interfacedcl_list.'}'
+
+ '}' shift 603
+ . error
+
+
+state 548
+ ohidden_interfacedcl_list: hidden_interfacedcl_list. (301)
+ hidden_interfacedcl_list: hidden_interfacedcl_list.';' hidden_interfacedcl
+
+ ';' shift 604
+ . reduce 301 (src line 1933)
+
+
+state 549
+ hidden_interfacedcl_list: hidden_interfacedcl. (350)
+
+ . reduce 350 (src line 2232)
+
+
+state 550
+ hidden_interfacedcl: sym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 605
+ . error
+
+
+state 551
+ hidden_interfacedcl: hidden_type. (334)
+
+ . reduce 334 (src line 2139)
+
+
+state 552
+ sym: LNAME. (157)
+ hidden_type_misc: LNAME. (318)
+
+ '(' reduce 157 (src line 1113)
+ . reduce 318 (src line 2017)
+
+
+state 553
+ sym: hidden_importsym. (158)
+ hidden_type_misc: hidden_importsym. (317)
+
+ '(' reduce 158 (src line 1122)
+ . reduce 317 (src line 2012)
+
+
+state 554
+ hidden_type_misc: LCHAN '(' hidden_type_recv_chan.')'
+
+ ')' shift 606
+ . error
+
+
+state 555
+ hidden_type_misc: LCHAN LCOMM hidden_type. (327)
+
+ . reduce 327 (src line 2064)
+
+
+state 556
+ hidden_type_recv_chan: LCOMM LCHAN hidden_type. (328)
+
+ . reduce 328 (src line 2071)
+
+
+state 557
+ hidden_type_func: LFUNC '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 607
+ . error
+
+
+state 558
+ hidden_import: LCONST hidden_pkg_importsym '=' hidden_constant ';'. (306)
+
+ . reduce 306 (src line 1953)
+
+
+state 559
+ hidden_constant: '(' hidden_literal.'+' hidden_literal ')'
+
+ '+' shift 608
+ . error
+
+
+state 560
+ hidden_literal: '-' LLITERAL. (340)
+
+ . reduce 340 (src line 2169)
+
+
+state 561
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '=' hidden_constant.';'
+
+ ';' shift 609
+ . error
+
+
+state 562
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 610
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 563
+ hidden_fndcl: '(' hidden_funarg_list ')' sym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 614
+ . error
+
+
+state 564
+ hidden_funarg_list: hidden_funarg_list ',' hidden_funarg. (347)
+
+ . reduce 347 (src line 2217)
+
+
+state 565
+ hidden_funarg: sym hidden_type oliteral. (330)
+
+ . reduce 330 (src line 2085)
+
+
+state 566
+ hidden_funarg: sym LDDD hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 615
+
+state 567
+ common_dcl: lconst '(' constdcl ';' constdcl_list osemi.')'
+
+ ')' shift 616
+ . error
+
+
+state 568
+ constdcl_list: constdcl_list ';'.constdcl1
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 480
+ constdcl goto 479
+ constdcl1 goto 617
+ hidden_importsym goto 11
+
+state 569
+ constdcl: dcl_name_list ntype.'=' expr_list
+ constdcl1: dcl_name_list ntype. (45)
+
+ '=' shift 367
+ . reduce 45 (src line 386)
+
+
+state 570
+ fndcl: '(' oarg_type_list_ocomma ')' sym '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 618
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 571
+ fnres: '(' oarg_type_list_ocomma.')'
+
+ ')' shift 619
+ . error
+
+
+state 572
+ fndcl: sym '(' oarg_type_list_ocomma ')' fnres. (205)
+
+ . reduce 205 (src line 1336)
+
+
+state 573
+ loop_body: LBODY $$65 stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 620
+ . error
+
+
+state 574
+ for_header: osimple_stmt ';' osimple_stmt ';'.osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 621
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 575
+ range_stmt: expr_list '=' LRANGE expr. (67)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 67 (src line 610)
+
+
+state 576
+ range_stmt: expr_list LCOLAS LRANGE expr. (68)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 68 (src line 617)
+
+
+state 577
+ caseblock_list: caseblock_list.caseblock
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY caseblock_list.'}'
+
+ LCASE shift 506
+ LDEFAULT shift 507
+ '}' shift 622
+ . error
+
+ case goto 505
+ caseblock goto 503
+
+state 578
+ caseblock: case $$61.stmt_list
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 623
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 579
+ case: LCASE expr_or_type_list.':'
+ case: LCASE expr_or_type_list.'=' expr ':'
+ case: LCASE expr_or_type_list.LCOLAS expr ':'
+ expr_or_type_list: expr_or_type_list.',' expr_or_type
+
+ LCOLAS shift 626
+ '=' shift 625
+ ':' shift 624
+ ',' shift 627
+ . error
+
+
+state 580
+ case: LDEFAULT ':'. (58)
+
+ . reduce 58 (src line 524)
+
+
+state 581
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80.elseif_list else
+ elseif_list: . (84)
+
+ . reduce 84 (src line 734)
+
+ elseif_list goto 628
+
+state 582
+ pseudocall: pexpr '(' expr_or_type_list LDDD ocomma ')'. (125)
+
+ . reduce 125 (src line 934)
+
+
+state 583
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ']'. (132)
+
+ . reduce 132 (src line 970)
+
+
+state 584
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':'.oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 629
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 585
+ keyval_list: keyval_list ',' keyval. (282)
+
+ . reduce 282 (src line 1872)
+
+
+state 586
+ keyval_list: keyval_list ',' bare_complitexpr. (283)
+
+ . reduce 283 (src line 1876)
+
+
+state 587
+ keyval: expr ':' complitexpr. (141)
+
+ . reduce 141 (src line 1020)
+
+
+state 588
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ complitexpr: expr. (144)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 144 (src line 1046)
+
+
+state 589
+ complitexpr: '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 630
+
+state 590
+ bare_complitexpr: '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 631
+ . error
+
+
+state 591
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 632
+ . error
+
+
+state 592
+ ntype: '(' ntype ')'. (171)
+ non_recvchantype: '(' ntype ')'. (180)
+
+ LBODY reduce 180 (src line 1215)
+ '(' reduce 180 (src line 1215)
+ '{' reduce 180 (src line 1215)
+ . reduce 171 (src line 1196)
+
+
+state 593
+ compound_stmt: '{' $$59 stmt_list '}'. (60)
+
+ . reduce 60 (src line 549)
+
+
+state 594
+ structdcl: '(' embed ')' oliteral. (232)
+
+ . reduce 232 (src line 1594)
+
+
+state 595
+ structdcl: '(' '*' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 633
+
+state 596
+ structdcl: '*' '(' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 634
+
+state 597
+ indcl: '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 635
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 598
+ hidden_type_misc: '[' LLITERAL ']' hidden_type. (320)
+
+ . reduce 320 (src line 2032)
+
+
+state 599
+ hidden_type_misc: LMAP '[' hidden_type ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 636
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 600
+ hidden_type_misc: LSTRUCT '{' ohidden_structdcl_list '}'. (322)
+
+ . reduce 322 (src line 2040)
+
+
+state 601
+ hidden_structdcl_list: hidden_structdcl_list ';'.hidden_structdcl
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 546
+ hidden_importsym goto 11
+ hidden_structdcl goto 637
+
+state 602
+ hidden_structdcl: sym hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 638
+
+state 603
+ hidden_type_misc: LINTERFACE '{' ohidden_interfacedcl_list '}'. (323)
+
+ . reduce 323 (src line 2044)
+
+
+state 604
+ hidden_interfacedcl_list: hidden_interfacedcl_list ';'.hidden_interfacedcl
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 552
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 550
+ hidden_importsym goto 553
+ hidden_interfacedcl goto 639
+ hidden_type goto 551
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 605
+ hidden_interfacedcl: sym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 640
+
+state 606
+ hidden_type_misc: LCHAN '(' hidden_type_recv_chan ')'. (326)
+
+ . reduce 326 (src line 2058)
+
+
+state 607
+ hidden_type_func: LFUNC '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 641
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 608
+ hidden_constant: '(' hidden_literal '+'.hidden_literal ')'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_literal goto 642
+
+state 609
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'. (307)
+
+ . reduce 307 (src line 1957)
+
+
+state 610
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres. (207)
+
+ . reduce 207 (src line 1405)
+
+
+state 611
+ ohidden_funres: hidden_funres. (336)
+
+ . reduce 336 (src line 2148)
+
+
+state 612
+ hidden_funres: '('.ohidden_funarg_list ')'
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 643
+
+state 613
+ hidden_funres: hidden_type. (338)
+
+ . reduce 338 (src line 2155)
+
+
+state 614
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 644
+
+state 615
+ hidden_funarg: sym LDDD hidden_type oliteral. (331)
+
+ . reduce 331 (src line 2094)
+
+
+state 616
+ common_dcl: lconst '(' constdcl ';' constdcl_list osemi ')'. (33)
+
+ . reduce 33 (src line 330)
+
+
+state 617
+ constdcl_list: constdcl_list ';' constdcl1. (223)
+
+ . reduce 223 (src line 1532)
+
+
+state 618
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 645
+ . error
+
+
+state 619
+ fnres: '(' oarg_type_list_ocomma ')'. (214)
+
+ . reduce 214 (src line 1478)
+
+
+state 620
+ loop_body: LBODY $$65 stmt_list '}'. (66)
+
+ . reduce 66 (src line 604)
+
+
+state 621
+ for_header: osimple_stmt ';' osimple_stmt ';' osimple_stmt. (70)
+
+ . reduce 70 (src line 630)
+
+
+state 622
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY caseblock_list '}'. (90)
+
+ . reduce 90 (src line 768)
+
+
+state 623
+ caseblock: case $$61 stmt_list. (62)
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ . reduce 62 (src line 571)
+
+
+state 624
+ case: LCASE expr_or_type_list ':'. (55)
+
+ . reduce 55 (src line 473)
+
+
+state 625
+ case: LCASE expr_or_type_list '='.expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 646
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 626
+ case: LCASE expr_or_type_list LCOLAS.expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 647
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 627
+ expr_or_type_list: expr_or_type_list ','.expr_or_type
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 511
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 628
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80 elseif_list.else
+ elseif_list: elseif_list.elseif
+ else: . (86)
+
+ LELSE shift 650
+ . reduce 86 (src line 743)
+
+ elseif goto 649
+ else goto 648
+
+state 629
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':' oexpr.']'
+
+ ']' shift 651
+ . error
+
+
+state 630
+ complitexpr: '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 652
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 631
+ bare_complitexpr: '{' start_complit braced_keyval_list '}'. (143)
+
+ . reduce 143 (src line 1040)
+
+
+state 632
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit braced_keyval_list '}'. (138)
+
+ . reduce 138 (src line 1004)
+
+
+state 633
+ structdcl: '(' '*' embed ')' oliteral. (234)
+
+ . reduce 234 (src line 1606)
+
+
+state 634
+ structdcl: '*' '(' embed ')' oliteral. (235)
+
+ . reduce 235 (src line 1613)
+
+
+state 635
+ indcl: '(' oarg_type_list_ocomma ')' fnres. (242)
+
+ . reduce 242 (src line 1668)
+
+
+state 636
+ hidden_type_misc: LMAP '[' hidden_type ']' hidden_type. (321)
+
+ . reduce 321 (src line 2036)
+
+
+state 637
+ hidden_structdcl_list: hidden_structdcl_list ';' hidden_structdcl. (349)
+
+ . reduce 349 (src line 2227)
+
+
+state 638
+ hidden_structdcl: sym hidden_type oliteral. (332)
+
+ . reduce 332 (src line 2110)
+
+
+state 639
+ hidden_interfacedcl_list: hidden_interfacedcl_list ';' hidden_interfacedcl. (351)
+
+ . reduce 351 (src line 2237)
+
+
+state 640
+ hidden_interfacedcl: sym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 653
+ . error
+
+
+state 641
+ hidden_type_func: LFUNC '(' ohidden_funarg_list ')' ohidden_funres. (329)
+
+ . reduce 329 (src line 2079)
+
+
+state 642
+ hidden_constant: '(' hidden_literal '+' hidden_literal.')'
+
+ ')' shift 654
+ . error
+
+
+state 643
+ hidden_funres: '(' ohidden_funarg_list.')'
+
+ ')' shift 655
+ . error
+
+
+state 644
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 656
+ . error
+
+
+state 645
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 657
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 646
+ case: LCASE expr_or_type_list '=' expr.':'
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 658
+ . error
+
+
+state 647
+ case: LCASE expr_or_type_list LCOLAS expr.':'
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 659
+ . error
+
+
+state 648
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80 elseif_list else. (81)
+
+ . reduce 81 (src line 703)
+
+
+state 649
+ elseif_list: elseif_list elseif. (85)
+
+ . reduce 85 (src line 738)
+
+
+state 650
+ elseif: LELSE.LIF $$82 if_header loop_body
+ else: LELSE.compound_stmt
+
+ LIF shift 660
+ '{' shift 308
+ . error
+
+ compound_stmt goto 661
+
+state 651
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':' oexpr ']'. (133)
+
+ . reduce 133 (src line 974)
+
+
+state 652
+ complitexpr: '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 662
+ . error
+
+
+state 653
+ hidden_interfacedcl: sym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 663
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 654
+ hidden_constant: '(' hidden_literal '+' hidden_literal ')'. (343)
+
+ . reduce 343 (src line 2197)
+
+
+state 655
+ hidden_funres: '(' ohidden_funarg_list ')'. (337)
+
+ . reduce 337 (src line 2150)
+
+
+state 656
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 664
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 657
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres. (206)
+
+ . reduce 206 (src line 1368)
+
+
+state 658
+ case: LCASE expr_or_type_list '=' expr ':'. (56)
+
+ . reduce 56 (src line 497)
+
+
+state 659
+ case: LCASE expr_or_type_list LCOLAS expr ':'. (57)
+
+ . reduce 57 (src line 515)
+
+
+state 660
+ elseif: LELSE LIF.$$82 if_header loop_body
+ $$82: . (82)
+
+ . reduce 82 (src line 720)
+
+ $$82 goto 665
+
+state 661
+ else: LELSE compound_stmt. (87)
+
+ . reduce 87 (src line 747)
+
+
+state 662
+ complitexpr: '{' start_complit braced_keyval_list '}'. (145)
+
+ . reduce 145 (src line 1048)
+
+
+state 663
+ hidden_interfacedcl: sym '(' ohidden_funarg_list ')' ohidden_funres. (333)
+
+ . reduce 333 (src line 2134)
+
+
+state 664
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres. (208)
+
+ . reduce 208 (src line 1431)
+
+
+state 665
+ elseif: LELSE LIF $$82.if_header loop_body
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 666
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 666
+ elseif: LELSE LIF $$82 if_header.loop_body
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 667
+
+state 667
+ elseif: LELSE LIF $$82 if_header loop_body. (83)
+
+ . reduce 83 (src line 725)
+
+
+76 terminals, 142 nonterminals
+352 grammar rules, 668/2000 states
+0 shift/reduce, 0 reduce/reduce conflicts reported
+191 working sets used
+memory: parser 3749/30000
+446 extra closures
+3093 shift entries, 64 exceptions
+603 goto entries
+1650 entries saved by goto default
+Optimizer space used: output 2282/30000
+2282 table entries, 722 zero
+maximum spread: 76, maximum offset: 666
diff --git a/src/cmd/internal/obj/ar.go b/src/cmd/internal/obj/ar.go
new file mode 100644
index 0000000000..3991e730b0
--- /dev/null
+++ b/src/cmd/internal/obj/ar.go
@@ -0,0 +1,45 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Inferno utils/include/ar.h
+// http://code.google.com/p/inferno-os/source/browse/utils/include/ar.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+type ar_hdr struct {
+ Name string
+ Date string
+ Uid string
+ Gid string
+ Mode string
+ Size string
+ Fmag string
+}
diff --git a/src/cmd/internal/obj/arm/5.out.go b/src/cmd/internal/obj/arm/5.out.go
index d62b99e2eb..11c9fb92f2 100644
--- a/src/cmd/internal/obj/arm/5.out.go
+++ b/src/cmd/internal/obj/arm/5.out.go
@@ -87,6 +87,7 @@ const (
REGEXT = REG_R10
REGG = REGEXT - 0
REGM = REGEXT - 1
+ REGCTXT = REG_R7
REGTMP = REG_R11
REGSP = REG_R13
REGLINK = REG_R14
diff --git a/src/cmd/internal/obj/arm/anames5.go b/src/cmd/internal/obj/arm/anames5.go
index 5e08bb93aa..f00cf17760 100644
--- a/src/cmd/internal/obj/arm/anames5.go
+++ b/src/cmd/internal/obj/arm/anames5.go
@@ -1,7 +1,7 @@
package arm
var Anames = []string{
- "XXX ",
+ "XXX",
"CALL",
"CHECKNIL",
"DATA",
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index 2e11ae3c11..aff9a17214 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -87,6 +87,8 @@ var optab = []Optab{
Optab{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
Optab{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0},
Optab{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
+ Optab{ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // prediction hinted form, hint ignored
+
Optab{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0},
Optab{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
Optab{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
@@ -272,12 +274,6 @@ var xcmp [C_GOK + 1][C_GOK + 1]uint8
var deferreturn *obj.LSym
-func nocache(p *obj.Prog) {
- p.Optab = 0
- p.From.Class = 0
- p.To.Class = 0
-}
-
/* size of a case statement including jump table */
func casesz(ctxt *obj.Link, p *obj.Prog) int32 {
var jt int = 0
@@ -341,7 +337,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
case APLD:
out[0] = 0xe1a01001 // (MOVW R1, R1)
- break
}
}
@@ -452,8 +447,8 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else {
a2 = &q.From
}
- nocache(q)
- nocache(p)
+ obj.Nocache(q)
+ obj.Nocache(p)
// insert q after p
q.Link = p.Link
@@ -526,8 +521,8 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else {
a2 = &q.From
}
- nocache(q)
- nocache(p)
+ obj.Nocache(q)
+ obj.Nocache(p)
// insert q after p
q.Link = p.Link
@@ -557,7 +552,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else if out != nil {
asmout(ctxt, p, o, out)
}
- break
}
// destination register specific
@@ -574,7 +568,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
p.Pc += 4
}
size += 4
- break
}
}
@@ -667,7 +660,6 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
if p.Scond&C_SCOND == C_SCOND_NONE {
flushpool(ctxt, p, 0, 0)
}
- break
}
if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
@@ -936,7 +928,6 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
C_LACON:
t.To.Type = obj.TYPE_CONST
t.To.Offset = ctxt.Instoffset
- break
}
if t.Pcrel == nil {
@@ -1719,8 +1710,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
// runtime.tlsg is special.
// Its "address" is the offset from the TLS thread pointer
// to the thread-local g and m pointers.
- // Emit a TLS relocation instead of a standard one.
- if rel.Sym == ctxt.Tlsg {
+ // Emit a TLS relocation instead of a standard one if its
+ // type is not explicitly set by runtime. This assumes that
+ // all references to runtime.tlsg should be accompanied with
+ // its type declaration if necessary.
+ if rel.Sym == ctxt.Tlsg && ctxt.Tlsg.Type == 0 {
rel.Type = obj.R_TLS
if ctxt.Flag_shared != 0 {
rel.Add += ctxt.Pc - p.Pcrel.Pc - 8 - int64(rel.Siz)
@@ -1932,7 +1926,6 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= uint32(p.To.Offset & 0xffff)
o1 |= (uint32(p.From.Reg) & 15) << 16
aclass(ctxt, &p.From)
- break
}
if ctxt.Instoffset != 0 {
@@ -2473,7 +2466,6 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.As == ADATABUNDLE {
o1 = 0xe125be70
}
- break
}
out[0] = o1
diff --git a/src/cmd/internal/obj/arm/list5.go b/src/cmd/internal/obj/arm/list5.go
index 9986d14931..6bd1ddf13c 100644
--- a/src/cmd/internal/obj/arm/list5.go
+++ b/src/cmd/internal/obj/arm/list5.go
@@ -189,7 +189,7 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$%.17g", a.U.Dval)
case obj.TYPE_SCONST:
- str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+ str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_REGREG:
str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
@@ -318,7 +318,6 @@ func Mconv(a *obj.Addr) string {
case obj.NAME_PARAM:
str = fmt.Sprintf("%s+%d(FP)", s.Name, int(a.Offset))
- break
}
out:
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 292b232f7a..406939e058 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -38,10 +38,11 @@ import (
"math"
)
+var progedit_tlsfallback *obj.LSym
+
func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string
var s *obj.LSym
- var tlsfallback *obj.LSym
p.From.Class = 0
p.To.Class = 0
@@ -55,7 +56,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
- break
}
// Replace TLS register fetches on older ARM procesors.
@@ -71,8 +71,8 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if ctxt.Goarm < 7 {
// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
- if tlsfallback == nil {
- tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
+ if progedit_tlsfallback == nil {
+ progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
}
// MOVW LR, R11
@@ -88,7 +88,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
- p.To.Sym = tlsfallback
+ p.To.Sym = progedit_tlsfallback
p.To.Offset = 0
// MOVW R11, LR
@@ -105,8 +105,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// Otherwise, MRC/MCR instructions need no further treatment.
p.As = AWORD
-
- break
}
// Rewrite float constants to values stored in memory.
@@ -148,8 +146,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Name = obj.NAME_EXTERN
p.From.Offset = 0
}
-
- break
}
if ctxt.Flag_shared != 0 {
@@ -191,12 +187,6 @@ func linkcase(casep *obj.Prog) {
}
}
-func nocache5(p *obj.Prog) {
- p.Optab = 0
- p.From.Class = 0
- p.To.Class = 0
-}
-
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var pl *obj.Prog
@@ -363,8 +353,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.Pcond = q1
}
}
-
- break
}
q = p
@@ -503,7 +491,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
case obj.ARET:
- nocache5(p)
+ obj.Nocache(p)
if cursym.Text.Mark&LEAF != 0 {
if !(autosize != 0) {
p.As = AB
@@ -609,7 +597,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
case AMODU:
p.To.Sym = ctxt.Sym_modu
- break
}
/* MOV REGTMP, b */
@@ -671,7 +658,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
p.Spadj = int32(-p.From.Offset)
}
- break
}
}
}
@@ -1076,6 +1062,8 @@ loop:
}
var Linkarm = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.LittleEndian,
Pconv: Pconv,
Name: "arm",
diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go
index 66995a3cd7..265e609bbe 100644
--- a/src/cmd/internal/obj/data.go
+++ b/src/cmd/internal/obj/data.go
@@ -142,7 +142,7 @@ func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
return off + wid
}
-func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
var off int64
off = s.Size
@@ -150,23 +150,23 @@ func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
return off
}
-func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
- return adduintxx(ctxt, s, uint64(v), 1)
+func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+ return Adduintxx(ctxt, s, uint64(v), 1)
}
-func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
- return adduintxx(ctxt, s, uint64(v), 2)
+func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+ return Adduintxx(ctxt, s, uint64(v), 2)
}
func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
- return adduintxx(ctxt, s, uint64(v), 4)
+ return Adduintxx(ctxt, s, uint64(v), 4)
}
func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
- return adduintxx(ctxt, s, v, 8)
+ return Adduintxx(ctxt, s, v, 8)
}
-func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+func Setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 1)
}
@@ -174,7 +174,7 @@ func setuint16(ctxt *Link, s *LSym, r int64, v uint16) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 2)
}
-func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+func Setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 4)
}
@@ -182,7 +182,7 @@ func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
return Setuintxx(ctxt, s, r, v, 8)
}
-func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
@@ -222,11 +222,11 @@ func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
return i + int64(r.Siz)
}
-func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
- return addaddrplus(ctxt, s, t, 0)
+func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+ return Addaddrplus(ctxt, s, t, 0)
}
-func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
var r *Reloc
if s.Type == 0 {
@@ -247,11 +247,11 @@ func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
return off + int64(r.Siz)
}
-func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
- return setaddrplus(ctxt, s, off, t, 0)
+func Setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+ return Setaddrplus(ctxt, s, off, t, 0)
}
-func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
var i int64
var r *Reloc
@@ -270,7 +270,7 @@ func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
return i + int64(r.Siz)
}
-func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func Addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
diff --git a/src/cmd/internal/obj/flag.go b/src/cmd/internal/obj/flag.go
new file mode 100644
index 0000000000..37594372f8
--- /dev/null
+++ b/src/cmd/internal/obj/flag.go
@@ -0,0 +1,120 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+)
+
+func Flagfn2(string, string, func(string, string)) { panic("flag") }
+
+func Flagcount(name, usage string, val *int) {
+ flag.Var((*count)(val), name, usage)
+}
+
+func Flagint32(name, usage string, val *int32) {
+ flag.Var((*int32Value)(val), name, usage)
+}
+
+func Flagint64(name, usage string, val *int64) {
+ flag.Int64Var(val, name, *val, usage)
+}
+
+func Flagstr(name, usage string, val *string) {
+ flag.StringVar(val, name, *val, usage)
+}
+
+func Flagfn0(name, usage string, f func()) {
+ flag.Var(fn0(f), name, usage)
+}
+
+func Flagfn1(name, usage string, f func(string)) {
+ flag.Var(fn1(f), name, usage)
+}
+
+func Flagprint(fd int) {
+ if fd == 1 {
+ flag.CommandLine.SetOutput(os.Stdout)
+ }
+ flag.PrintDefaults()
+}
+
+func Flagparse(usage func()) {
+ flag.Usage = usage
+ flag.Parse()
+}
+
+// count is a flag.Value that is like a flag.Bool and a flag.Int.
+// If used as -name, it increments the count, but -name=x sets the count.
+// Used for verbose flag -v.
+type count int
+
+func (c *count) String() string {
+ return fmt.Sprint(int(*c))
+}
+
+func (c *count) Set(s string) error {
+ switch s {
+ case "true":
+ *c++
+ case "false":
+ *c = 0
+ default:
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ return fmt.Errorf("invalid count %q", s)
+ }
+ *c = count(n)
+ }
+ return nil
+}
+
+func (c *count) IsBoolFlag() bool {
+ return true
+}
+
+type int32Value int32
+
+func newIntValue(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Get() interface{} { return int32(*i) }
+
+func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
+
+type fn0 func()
+
+func (f fn0) Set(s string) error {
+ f()
+ return nil
+}
+
+func (f fn0) Get() interface{} { return nil }
+
+func (f fn0) String() string { return "" }
+
+func (f fn0) IsBoolFlag() bool {
+ return true
+}
+
+type fn1 func(string)
+
+func (f fn1) Set(s string) error {
+ f(s)
+ return nil
+}
+
+func (f fn1) String() string { return "" }
diff --git a/src/cmd/internal/obj/fmt.go b/src/cmd/internal/obj/fmt.go
index 6bb5c42722..b1936ef1ae 100644
--- a/src/cmd/internal/obj/fmt.go
+++ b/src/cmd/internal/obj/fmt.go
@@ -14,28 +14,6 @@
package obj
-// (The comments in this file were copied from the manpage files rune.3,
-// isalpharune.3, and runestrcat.3. Some formatting changes were also made
-// to conform to Google style. /JRM 11/11/05)
-
-type Fmt struct {
- runes uint8
- start interface{}
- to interface{}
- stop interface{}
- flush func(*Fmt) int
- farg interface{}
- nfmt int
- args []interface{}
- r uint
- width int
- prec int
- flags uint32
- decimal string
- thousands string
- grouping string
-}
-
const (
FmtWidth = 1
FmtLeft = FmtWidth << 1
@@ -54,7 +32,3 @@ const (
FmtLDouble = FmtByte << 1
FmtFlag = FmtLDouble << 1
)
-
-var fmtdoquote func(int) int
-
-/* Edit .+1,/^$/ | cfn $PLAN9/src/lib9/fmt/?*.c | grep -v static |grep -v __ */
diff --git a/src/cmd/internal/obj/go.go b/src/cmd/internal/obj/go.go
index 005730d9a3..3c176342ca 100644
--- a/src/cmd/internal/obj/go.go
+++ b/src/cmd/internal/obj/go.go
@@ -15,7 +15,7 @@ import (
var Framepointer_enabled int
-var fieldtrack_enabled int
+var Fieldtrack_enabled int
var Zprog Prog
@@ -30,7 +30,7 @@ var exper = []struct {
struct {
name string
val *int
- }{"fieldtrack", &fieldtrack_enabled},
+ }{"fieldtrack", &Fieldtrack_enabled},
struct {
name string
val *int
@@ -61,7 +61,79 @@ func linksetexp() {
}
}
-func expstring() string {
+// replace all "". with pkg.
+func Expandpkg(t0 string, pkg string) string {
+ return strings.Replace(t0, `"".`, pkg+".", -1)
+}
+
+func double2ieee(ieee *uint64, f float64) {
+ *ieee = math.Float64bits(f)
+}
+
+func Nopout(p *Prog) {
+ p.As = ANOP
+ p.Scond = Zprog.Scond
+ p.From = Zprog.From
+ p.From3 = Zprog.From3
+ p.Reg = Zprog.Reg
+ p.To = Zprog.To
+}
+
+func Nocache(p *Prog) {
+ p.Optab = 0
+ p.From.Class = 0
+ p.From3.Class = 0
+ p.To.Class = 0
+}
+
+/*
+ * bv.c
+ */
+
+/*
+ * closure.c
+ */
+
+/*
+ * const.c
+ */
+
+/*
+ * cplx.c
+ */
+
+/*
+ * dcl.c
+ */
+
+/*
+ * esc.c
+ */
+
+/*
+ * export.c
+ */
+
+/*
+ * fmt.c
+ */
+
+/*
+ * gen.c
+ */
+
+/*
+ * init.c
+ */
+
+/*
+ * inl.c
+ */
+
+/*
+ * lex.c
+ */
+func Expstring() string {
buf := "X"
for i := range exper {
if *exper[i].val != 0 {
@@ -73,12 +145,3 @@ func expstring() string {
}
return "X:" + buf[2:]
}
-
-// replace all "". with pkg.
-func expandpkg(t0 string, pkg string) string {
- return strings.Replace(t0, `"".`, pkg+".", -1)
-}
-
-func double2ieee(ieee *uint64, f float64) {
- *ieee = math.Float64bits(f)
-}
diff --git a/src/cmd/internal/obj/i386/8.out.go b/src/cmd/internal/obj/i386/8.out.go
index 72463471bb..d6c01cd5ff 100644
--- a/src/cmd/internal/obj/i386/8.out.go
+++ b/src/cmd/internal/obj/i386/8.out.go
@@ -590,4 +590,5 @@ const (
FREGRET = REG_F0
REGSP = REG_SP
REGTMP = REG_DI
+ REGCTXT = REG_DX
)
diff --git a/src/cmd/internal/obj/i386/anames8.go b/src/cmd/internal/obj/i386/anames8.go
index c0f6263ff7..ebf2e5e65a 100644
--- a/src/cmd/internal/obj/i386/anames8.go
+++ b/src/cmd/internal/obj/i386/anames8.go
@@ -4,7 +4,7 @@ package i386
* this is the ranlib header
*/
var Anames = []string{
- "XXX ",
+ "XXX",
"CALL",
"CHECKNIL",
"DATA",
@@ -23,7 +23,7 @@ var Anames = []string{
"USEFIELD",
"VARDEF",
"VARKILL",
- "AAA ",
+ "AAA",
"AAD",
"AAM",
"AAS",
diff --git a/src/cmd/internal/obj/i386/asm8.go b/src/cmd/internal/obj/i386/asm8.go
index 6ef1c3a43b..b176d50af5 100644
--- a/src/cmd/internal/obj/i386/asm8.go
+++ b/src/cmd/internal/obj/i386/asm8.go
@@ -2237,7 +2237,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
REG_SI,
REG_DI:
i = reg[index] << 3
- break
}
switch scale {
@@ -2255,7 +2254,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
case 8:
i |= 3 << 6
- break
}
bas:
@@ -2275,7 +2273,6 @@ bas:
REG_SI,
REG_DI:
i |= reg[base]
- break
}
ctxt.Andptr[0] = byte(i)
@@ -2404,7 +2401,6 @@ func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
case obj.NAME_AUTO,
obj.NAME_PARAM:
base = REG_SP
- break
}
if base == REG_NONE {
@@ -2446,7 +2442,6 @@ func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
case obj.NAME_AUTO,
obj.NAME_PARAM:
base = REG_SP
- break
}
if base == REG_TLS {
@@ -3166,7 +3161,6 @@ func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
REG_DL,
REG_DH:
cand = 0
- break
}
}
@@ -3183,7 +3177,6 @@ func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
case REG_DX:
cand = 0
- break
}
}
@@ -3259,7 +3252,6 @@ func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
ctxt.Andptr[0] = Pm
ctxt.Andptr = ctxt.Andptr[1:]
}
- break
}
ctxt.Andptr[0] = byte(op)
@@ -3900,7 +3892,6 @@ mfound:
ctxt.Andptr = ctxt.Andptr[1:]
ctxt.Andptr[0] = 0xb5
ctxt.Andptr = ctxt.Andptr[1:]
- break
}
asmand(ctxt, p, &p.From, reg[p.To.Reg])
@@ -3931,10 +3922,7 @@ mfound:
ctxt.Andptr[0] = t[5]
ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &p.To, reg[p.From.Index])
- break
}
-
- break
}
case 7: /* imul rm,r */
@@ -4004,10 +3992,7 @@ mfound:
ctxt.Andptr[0] = 0x8B
ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &pp.From, reg[p.To.Reg])
- break
}
-
- break
}
}
diff --git a/src/cmd/internal/obj/i386/list8.go b/src/cmd/internal/obj/i386/list8.go
index 33dfea44ad..dcc76f7454 100644
--- a/src/cmd/internal/obj/i386/list8.go
+++ b/src/cmd/internal/obj/i386/list8.go
@@ -59,13 +59,13 @@ func Pconv(p *obj.Prog) string {
default:
str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
// TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
// SHRQ $32(DX*0), AX
// Remove.
if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 {
- str += fmt.Sprintf(":%s", Rconv(int(p.From.Index)))
+ str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
}
- break
}
fp += str
@@ -145,7 +145,6 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
} else {
str = fmt.Sprintf("%d(FP)", a.Offset)
}
- break
}
if a.Index != REG_NONE {
@@ -167,13 +166,12 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$(%.17g)", a.U.Dval)
case obj.TYPE_SCONST:
- str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+ str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_ADDR:
a.Type = obj.TYPE_MEM
str = fmt.Sprintf("$%v", Dconv(p, 0, a))
a.Type = obj.TYPE_ADDR
- break
}
fp += str
diff --git a/src/cmd/internal/obj/i386/obj8.go b/src/cmd/internal/obj/i386/obj8.go
index 69385a1ef4..f833a9af52 100644
--- a/src/cmd/internal/obj/i386/obj8.go
+++ b/src/cmd/internal/obj/i386/obj8.go
@@ -124,7 +124,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
- break
}
// Rewrite float constants to values stored in memory.
@@ -224,8 +223,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Sym = s
p.From.Offset = 0
}
-
- break
}
}
@@ -912,6 +909,8 @@ loop:
}
var Link386 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.LittleEndian,
Pconv: Pconv,
Name: "386",
diff --git a/src/cmd/internal/obj/i386/util.go b/src/cmd/internal/obj/i386/util.go
index 9f86766100..b3e964361f 100644
--- a/src/cmd/internal/obj/i386/util.go
+++ b/src/cmd/internal/obj/i386/util.go
@@ -4,10 +4,6 @@
package i386
-const (
- fmtLong = 1 << iota
-)
-
func bool2int(b bool) int {
if b {
return 1
diff --git a/src/cmd/internal/obj/ld.go b/src/cmd/internal/obj/ld.go
index f28f6877bf..a53c8676e6 100644
--- a/src/cmd/internal/obj/ld.go
+++ b/src/cmd/internal/obj/ld.go
@@ -72,7 +72,7 @@ func addlib(ctxt *Link, src, obj, pathname string) {
if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
}
- addlibpath(ctxt, src, obj, pname, name)
+ Addlibpath(ctxt, src, obj, pname, name)
}
/*
@@ -82,7 +82,7 @@ func addlib(ctxt *Link, src, obj, pathname string) {
* file: object file, e.g., /home/rsc/go/pkg/container/vector.a
* pkg: package import path, e.g. container/vector
*/
-func addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
+func Addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
for _, lib := range ctxt.Library {
if lib.File == file {
return
diff --git a/src/cmd/internal/obj/libc.go b/src/cmd/internal/obj/libc.go
new file mode 100644
index 0000000000..204839e8af
--- /dev/null
+++ b/src/cmd/internal/obj/libc.go
@@ -0,0 +1,20 @@
+package obj
+
+const (
+ AEXIST = 0
+)
+
+var GOEXPERIMENT string
+
+const (
+ OREAD = iota
+ OWRITE
+ ORDWR
+ SIGBUS
+ SIGSEGV
+ NDFLT
+ FPPDBL
+ FPRNR
+ HEADER_IO
+ BOM = 0xFEFF
+)
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 308b6b63d5..d256fbbb18 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -241,6 +241,8 @@ type Plist struct {
type LinkArch struct {
Pconv func(*Prog) string
+ Dconv func(*Prog, int, *Addr) string
+ Rconv func(int) string
ByteOrder binary.ByteOrder
Name string
Thechar int
@@ -278,14 +280,14 @@ type Pcdata struct {
}
type Pciter struct {
- d Pcdata
- p []byte
- pc uint32
- nextpc uint32
- pcscale uint32
- value int32
- start int
- done int
+ D Pcdata
+ P []byte
+ Pc uint32
+ Nextpc uint32
+ Pcscale uint32
+ Value int32
+ Start int
+ Done int
}
// An Addr is an argument to an instruction.
diff --git a/src/cmd/internal/obj/mgc0.go b/src/cmd/internal/obj/mgc0.go
new file mode 100644
index 0000000000..7dfd991f34
--- /dev/null
+++ b/src/cmd/internal/obj/mgc0.go
@@ -0,0 +1,37 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+// -live (aka -live=1): print liveness lists as code warnings at safe points
+// -live=2: print an assembly listing with liveness annotations
+// -live=3: print information during each computation phase (much chattier)
+//
+// Each level includes the earlier output as well.
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Used by cmd/gc.
+
+const (
+ GcBits = 4
+ BitsPerPointer = 2
+ BitsDead = 0
+ BitsScalar = 1
+ BitsPointer = 2
+ BitsMask = 3
+ PointersPerByte = 8 / BitsPerPointer
+ InsData = 1 + iota - 7
+ InsArray
+ InsArrayEnd
+ InsEnd
+ MaxGCMask = 65536
+)
diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go
index d31ec2cf86..746ca2dd8d 100644
--- a/src/cmd/internal/obj/pcln.go
+++ b/src/cmd/internal/obj/pcln.go
@@ -316,45 +316,45 @@ func getvarint(pp *[]byte) uint32 {
return v
}
-func pciternext(it *Pciter) {
+func Pciternext(it *Pciter) {
var v uint32
var dv int32
- it.pc = it.nextpc
- if it.done != 0 {
+ it.Pc = it.Nextpc
+ if it.Done != 0 {
return
}
- if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
- it.done = 1
+ if -cap(it.P) >= -cap(it.D.P[len(it.D.P):]) {
+ it.Done = 1
return
}
// value delta
- v = getvarint(&it.p)
+ v = getvarint(&it.P)
- if v == 0 && !(it.start != 0) {
- it.done = 1
+ if v == 0 && !(it.Start != 0) {
+ it.Done = 1
return
}
- it.start = 0
+ it.Start = 0
dv = int32(v>>1) ^ (int32(v<<31) >> 31)
- it.value += dv
+ it.Value += dv
// pc delta
- v = getvarint(&it.p)
+ v = getvarint(&it.P)
- it.nextpc = it.pc + v*it.pcscale
+ it.Nextpc = it.Pc + v*it.Pcscale
}
-func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
- it.d = *d
- it.p = it.d.P
- it.pc = 0
- it.nextpc = 0
- it.value = -1
- it.start = 1
- it.done = 0
- it.pcscale = uint32(ctxt.Arch.Minlc)
- pciternext(it)
+func Pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
+ it.D = *d
+ it.P = it.D.P
+ it.Pc = 0
+ it.Nextpc = 0
+ it.Value = -1
+ it.Start = 1
+ it.Done = 0
+ it.Pcscale = uint32(ctxt.Arch.Minlc)
+ Pciternext(it)
}
diff --git a/src/cmd/internal/obj/ppc64/9.out.go b/src/cmd/internal/obj/ppc64/9.out.go
index 2eb4b0c89c..bcc99ba59e 100644
--- a/src/cmd/internal/obj/ppc64/9.out.go
+++ b/src/cmd/internal/obj/ppc64/9.out.go
@@ -134,7 +134,7 @@ const (
REGRT1 = REG_R3
REGRT2 = REG_R4
REGMIN = REG_R7
- REGENV = REG_R11
+ REGCTXT = REG_R11
REGTLS = REG_R13
REGMAX = REG_R27
REGEXT = REG_R30
diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go
index a49e14a9ee..9d755dedbc 100644
--- a/src/cmd/internal/obj/ppc64/anames9.go
+++ b/src/cmd/internal/obj/ppc64/anames9.go
@@ -4,7 +4,7 @@ package ppc64
* this is the ranlib header
*/
var Anames = []string{
- "XXX ",
+ "XXX",
"CALL",
"CHECKNIL",
"DATA",
@@ -23,7 +23,7 @@ var Anames = []string{
"USEFIELD",
"VARDEF",
"VARKILL",
- "ADD ",
+ "ADD",
"ADDCC",
"ADDV",
"ADDVCC",
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 9f6b7f3924..5f6a4d7efd 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -2443,7 +2443,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
//if(dlm) reloc(&p->from, p->pc, 1);
- break
+
}
out[0] = o1
diff --git a/src/cmd/internal/obj/ppc64/list9.go b/src/cmd/internal/obj/ppc64/list9.go
index c7d892b345..8bd94839d4 100644
--- a/src/cmd/internal/obj/ppc64/list9.go
+++ b/src/cmd/internal/obj/ppc64/list9.go
@@ -181,8 +181,7 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$%.17g", a.U.Dval)
case obj.TYPE_SCONST:
- str = fmt.Sprintf("$\"%q\"", a.U.Sval)
- break
+ str = fmt.Sprintf("$%q", a.U.Sval)
}
fp += str
@@ -241,7 +240,6 @@ func Mconv(a *obj.Addr) string {
} else {
str = fmt.Sprintf("%s+%d(FP)", s.Name, a.Offset)
}
- break
}
//out:
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 2040089705..2e0ec874fb 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -53,7 +53,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
- break
}
// Rewrite float constants to values stored in memory.
@@ -118,8 +117,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Offset = -p.From.Offset
p.As = AADD
}
-
- break
}
}
@@ -596,7 +593,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
p.Spadj = int32(-p.From.Offset)
}
- break
}
}
}
@@ -969,6 +965,8 @@ loop:
}
var Linkppc64 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.BigEndian,
Pconv: Pconv,
Name: "ppc64",
@@ -984,6 +982,8 @@ var Linkppc64 = obj.LinkArch{
}
var Linkppc64le = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.LittleEndian,
Pconv: Pconv,
Name: "ppc64le",
diff --git a/src/cmd/internal/obj/ppc64/util.go b/src/cmd/internal/obj/ppc64/util.go
index 25be412555..0df4af78d8 100644
--- a/src/cmd/internal/obj/ppc64/util.go
+++ b/src/cmd/internal/obj/ppc64/util.go
@@ -4,10 +4,6 @@
package ppc64
-const (
- fmtLong = 1 << iota
-)
-
func bool2int(b bool) int {
if b {
return 1
diff --git a/src/cmd/internal/obj/stack.go b/src/cmd/internal/obj/stack.go
index 4b8c0b463d..b8d0350d89 100644
--- a/src/cmd/internal/obj/stack.go
+++ b/src/cmd/internal/obj/stack.go
@@ -40,7 +40,8 @@ package obj
// TODO(rsc): Share Go definitions with linkers directly.
const (
- StackSystem = 0
+ STACKSYSTEM = 0
+ StackSystem = STACKSYSTEM
StackBig = 4096
StackGuard = 640 + StackSystem
StackSmall = 128
diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go
index 583b3256f6..e1ba5de023 100644
--- a/src/cmd/internal/obj/sym.go
+++ b/src/cmd/internal/obj/sym.go
@@ -100,7 +100,7 @@ var headers = []struct {
}{"windowsgui", Hwindows},
}
-func headtype(name string) int {
+func Headtype(name string) int {
var i int
for i = 0; i < len(headers); i++ {
@@ -111,8 +111,9 @@ func headtype(name string) int {
return -1
}
+var headstr_buf string
+
func Headstr(v int) string {
- var buf string
var i int
for i = 0; i < len(headers); i++ {
@@ -120,8 +121,8 @@ func Headstr(v int) string {
return headers[i].name
}
}
- buf = fmt.Sprintf("%d", v)
- return buf
+ headstr_buf = fmt.Sprintf("%d", v)
+ return headstr_buf
}
func Linknew(arch *LinkArch) *Link {
@@ -145,7 +146,7 @@ func Linknew(arch *LinkArch) *Link {
ctxt.Pathname = buf
- ctxt.Headtype = headtype(Getgoos())
+ ctxt.Headtype = Headtype(Getgoos())
if ctxt.Headtype < 0 {
log.Fatalf("unknown goos %s", Getgoos())
}
@@ -178,15 +179,14 @@ func Linknew(arch *LinkArch) *Link {
default:
log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
+ case '5':
+ ctxt.Tlsoffset = 0
+
case '6':
ctxt.Tlsoffset = 0
case '8':
ctxt.Tlsoffset = -8
-
- case '5':
- ctxt.Tlsoffset = 0
- break
}
/*
@@ -203,10 +203,10 @@ func Linknew(arch *LinkArch) *Link {
case '8':
ctxt.Tlsoffset = 0x468
- break
- }
- break
+ case '5':
+ ctxt.Tlsoffset = 0 // dummy value, not needed
+ }
}
// On arm, record goarm.
@@ -222,7 +222,7 @@ func Linknew(arch *LinkArch) *Link {
return ctxt
}
-func linknewsym(ctxt *Link, symb string, v int) *LSym {
+func Linknewsym(ctxt *Link, symb string, v int) *LSym {
var s *LSym
s = new(LSym)
@@ -265,7 +265,7 @@ func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
return nil
}
- s = linknewsym(ctxt, symb, v)
+ s = Linknewsym(ctxt, symb, v)
s.Extname = s.Name
s.Hash = ctxt.Hash[h]
ctxt.Hash[h] = s
@@ -278,6 +278,13 @@ func Linklookup(ctxt *Link, name string, v int) *LSym {
}
// read-only lookup
-func linkrlookup(ctxt *Link, name string, v int) *LSym {
+func Linkrlookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, 0)
}
+
+func Linksymfmt(s *LSym) string {
+ if s == nil {
+ return "<nil>"
+ }
+ return s.Name
+}
diff --git a/src/cmd/internal/obj/typekind.go b/src/cmd/internal/obj/typekind.go
new file mode 100644
index 0000000000..f8e302bd32
--- /dev/null
+++ b/src/cmd/internal/obj/typekind.go
@@ -0,0 +1,45 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Must match runtime and reflect.
+// Included by cmd/gc.
+
+const (
+ KindBool = 1 + iota
+ KindInt
+ KindInt8
+ KindInt16
+ KindInt32
+ KindInt64
+ KindUint
+ KindUint8
+ KindUint16
+ KindUint32
+ KindUint64
+ KindUintptr
+ KindFloat32
+ KindFloat64
+ KindComplex64
+ KindComplex128
+ KindArray
+ KindChan
+ KindFunc
+ KindInterface
+ KindMap
+ KindPtr
+ KindSlice
+ KindString
+ KindStruct
+ KindUnsafePointer
+ KindDirectIface = 1 << 5
+ KindGCProg = 1 << 6
+ KindNoPointers = 1 << 7
+ KindMask = (1 << 5) - 1
+)
diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go
index de7197e0ba..60708fa558 100644
--- a/src/cmd/internal/obj/util.go
+++ b/src/cmd/internal/obj/util.go
@@ -8,6 +8,7 @@ import (
"bufio"
"fmt"
"io"
+ "log"
"os"
"strconv"
"time"
@@ -23,21 +24,30 @@ func Cputime() float64 {
}
type Biobuf struct {
- unget int
- haveUnget bool
- f *os.File
- r *bufio.Reader
- w *bufio.Writer
+ unget [2]int
+ numUnget int
+ f *os.File
+ r *bufio.Reader
+ w *bufio.Writer
+ linelen int
}
func Bopenw(name string) (*Biobuf, error) {
- f, err := os.Open(name)
+ f, err := os.Create(name)
if err != nil {
return nil, err
}
return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil
}
+func Bopenr(name string) (*Biobuf, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &Biobuf{f: f, r: bufio.NewReader(f)}, nil
+}
+
func Binitw(w io.Writer) *Biobuf {
return &Biobuf{w: bufio.NewWriter(w)}
}
@@ -46,6 +56,41 @@ func (b *Biobuf) Write(p []byte) (int, error) {
return b.w.Write(p)
}
+func Bwritestring(b *Biobuf, p string) (int, error) {
+ return b.w.WriteString(p)
+}
+
+func Bseek(b *Biobuf, offset int64, whence int) int64 {
+ if b.w != nil {
+ if err := b.w.Flush(); err != nil {
+ log.Fatal("writing output: %v", err)
+ }
+ } else if b.r != nil {
+ if whence == 1 {
+ offset -= int64(b.r.Buffered())
+ }
+ }
+ off, err := b.f.Seek(offset, whence)
+ if err != nil {
+ log.Fatal("seeking in output: %v", err)
+ }
+ if b.r != nil {
+ b.r.Reset(b.f)
+ }
+ return off
+}
+
+func Boffset(b *Biobuf) int64 {
+ if err := b.w.Flush(); err != nil {
+ log.Fatal("writing output: %v", err)
+ }
+ off, err := b.f.Seek(0, 1)
+ if err != nil {
+ log.Fatal("seeking in output: %v", err)
+ }
+ return off
+}
+
func (b *Biobuf) Flush() error {
return b.w.Flush()
}
@@ -58,26 +103,86 @@ func Bputc(b *Biobuf, c byte) {
b.w.WriteByte(c)
}
+const Beof = -1
+
+func Bread(b *Biobuf, p []byte) int {
+ n, err := io.ReadFull(b.r, p)
+ if n == 0 {
+ if err != nil && err != io.EOF {
+ n = -1
+ }
+ }
+ return n
+}
+
func Bgetc(b *Biobuf) int {
- if b.haveUnget {
- b.haveUnget = false
- return int(b.unget)
+ if b.numUnget > 0 {
+ b.numUnget--
+ return int(b.unget[b.numUnget])
}
c, err := b.r.ReadByte()
+ r := int(c)
+ if err != nil {
+ r = -1
+ }
+ b.unget[1] = b.unget[0]
+ b.unget[0] = r
+ return r
+}
+
+func Bgetrune(b *Biobuf) int {
+ r, _, err := b.r.ReadRune()
if err != nil {
- b.unget = -1
return -1
}
- b.unget = int(c)
- return int(c)
+ return int(r)
}
-func Bungetc(b *Biobuf) {
- b.haveUnget = true
+func Bungetrune(b *Biobuf) {
+ b.r.UnreadRune()
}
-func Boffset(b *Biobuf) int64 {
- panic("Boffset")
+func (b *Biobuf) Read(p []byte) (int, error) {
+ return b.r.Read(p)
+}
+
+func Brdline(b *Biobuf, delim int) string {
+ s, err := b.r.ReadBytes(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ b.linelen = len(s)
+ return string(s)
+}
+
+func Brdstr(b *Biobuf, delim int, cut int) string {
+ s, err := b.r.ReadString(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ if len(s) > 0 && cut > 0 {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func Access(name string, mode int) int {
+ if mode != 0 {
+ panic("bad access")
+ }
+ _, err := os.Stat(name)
+ if err != nil {
+ return -1
+ }
+ return 0
+}
+
+func Blinelen(b *Biobuf) int {
+ return b.linelen
+}
+
+func Bungetc(b *Biobuf) {
+ b.numUnget++
}
func Bflush(b *Biobuf) error {
@@ -85,7 +190,10 @@ func Bflush(b *Biobuf) error {
}
func Bterm(b *Biobuf) error {
- err := b.w.Flush()
+ var err error
+ if b.w != nil {
+ err = b.w.Flush()
+ }
err1 := b.f.Close()
if err == nil {
err = err1
@@ -116,6 +224,10 @@ func Getgoarm() string {
return envOr("GOARM", defaultGOARM)
}
+func Getgo386() string {
+ return envOr("GO386", defaultGO386)
+}
+
func Getgoversion() string {
return version
}
@@ -145,3 +257,15 @@ func (ctxt *Link) NewProg() *Prog {
func (ctxt *Link) Line(n int) string {
return Linklinefmt(ctxt, n, false, false)
}
+
+func (ctxt *Link) Dconv(a *Addr) string {
+ return ctxt.Arch.Dconv(nil, 0, a)
+}
+
+func (ctxt *Link) Rconv(reg int) string {
+ return ctxt.Arch.Rconv(reg)
+}
+
+func Getcallerpc(interface{}) uintptr {
+ return 1
+}
diff --git a/src/cmd/internal/obj/x86/6.out.go b/src/cmd/internal/obj/x86/6.out.go
index f96597b066..9a6cef3227 100644
--- a/src/cmd/internal/obj/x86/6.out.go
+++ b/src/cmd/internal/obj/x86/6.out.go
@@ -789,6 +789,7 @@ const (
FREGRET = REG_X0
REGSP = REG_SP
REGTMP = REG_DI
+ REGCTXT = REG_DX
REGEXT = REG_R15
FREGMIN = REG_X0 + 5
FREGEXT = REG_X0 + 15
diff --git a/src/cmd/internal/obj/x86/anames6.go b/src/cmd/internal/obj/x86/anames6.go
index c7cc409e5f..fca730bf9d 100644
--- a/src/cmd/internal/obj/x86/anames6.go
+++ b/src/cmd/internal/obj/x86/anames6.go
@@ -4,7 +4,7 @@ package x86
* this is the ranlib header
*/
var Anames = []string{
- "XXX ",
+ "XXX",
"CALL",
"CHECKNIL",
"DATA",
@@ -23,7 +23,7 @@ var Anames = []string{
"USEFIELD",
"VARDEF",
"VARKILL",
- "AAA ",
+ "AAA",
"AAD",
"AAM",
"AAS",
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 689e6414c2..1d86d46cee 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2864,7 +2864,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
REG_SI,
REG_DI:
i = reg[index] << 3
- break
}
switch scale {
@@ -2882,7 +2881,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
case 8:
i |= 3 << 6
- break
}
bas:
@@ -2915,7 +2913,6 @@ bas:
REG_SI,
REG_DI:
i |= reg[base]
- break
}
ctxt.Andptr[0] = byte(i)
@@ -3086,7 +3083,6 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
case obj.NAME_AUTO,
obj.NAME_PARAM:
base = REG_SP
- break
}
ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex
@@ -3132,7 +3128,6 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
case obj.NAME_AUTO,
obj.NAME_PARAM:
base = REG_SP
- break
}
if base == REG_TLS {
@@ -3443,7 +3438,6 @@ func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
ctxt.Andptr[0] = Pm
ctxt.Andptr = ctxt.Andptr[1:]
}
- break
}
ctxt.Andptr[0] = byte(op)
@@ -3569,7 +3563,6 @@ found:
if p.Mode != 64 {
ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
}
- break
}
if z >= len(o.op) {
@@ -4075,8 +4068,6 @@ found:
}
}
}
-
- break
}
return
@@ -4231,7 +4222,6 @@ mfound:
ctxt.Andptr = ctxt.Andptr[1:]
ctxt.Andptr[0] = 0xb5
ctxt.Andptr = ctxt.Andptr[1:]
- break
}
asmand(ctxt, p, &p.From, &p.To)
@@ -4274,7 +4264,6 @@ mfound:
ctxt.Andptr[0] = t[1]
ctxt.Andptr = ctxt.Andptr[1:]
asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
- break
}
}
@@ -4335,10 +4324,7 @@ mfound:
ctxt.Andptr[0] = 0x8B
ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &pp.From, &p.To)
- break
}
-
- break
}
}
@@ -4512,7 +4498,6 @@ func asmins(ctxt *obj.Link, p *obj.Prog) {
AMOVSQ:
copy(ctxt.Andptr, naclmovs)
ctxt.Andptr = ctxt.Andptr[len(naclmovs):]
- break
}
if ctxt.Rep != 0 {
@@ -4585,7 +4570,6 @@ func asmins(ctxt *obj.Link, p *obj.Prog) {
case REG_BP:
copy(ctxt.Andptr, naclbpfix)
ctxt.Andptr = ctxt.Andptr[len(naclbpfix):]
- break
}
}
}
diff --git a/src/cmd/internal/obj/x86/list6.go b/src/cmd/internal/obj/x86/list6.go
index af10f38ef0..e50de9508b 100644
--- a/src/cmd/internal/obj/x86/list6.go
+++ b/src/cmd/internal/obj/x86/list6.go
@@ -71,13 +71,13 @@ func Pconv(p *obj.Prog) string {
default:
str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
// TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
// SHRQ $32(DX*0), AX
// Remove.
- if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 {
- str += fmt.Sprintf(":%s", Rconv(int(p.From.Index)))
+ if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != REG_NONE {
+ str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
}
- break
}
fp += str
@@ -157,7 +157,6 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
} else {
str = fmt.Sprintf("%d(FP)", a.Offset)
}
- break
}
if a.Index != REG_NONE {
@@ -179,13 +178,12 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$(%.17g)", a.U.Dval)
case obj.TYPE_SCONST:
- str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+ str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_ADDR:
a.Type = obj.TYPE_MEM
str = fmt.Sprintf("$%v", Dconv(p, 0, a))
a.Type = obj.TYPE_ADDR
- break
}
fp += str
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 82960d746f..c1ad6b7a42 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -38,16 +38,6 @@ import (
"math"
)
-func nopout(p *obj.Prog) {
- p.As = obj.ANOP
- p.From.Type = obj.TYPE_NONE
- p.From.Reg = 0
- p.From.Name = 0
- p.To.Type = obj.TYPE_NONE
- p.To.Reg = 0
- p.To.Name = 0
-}
-
func canuselocaltls(ctxt *obj.Link) int {
switch ctxt.Headtype {
case obj.Hplan9,
@@ -109,7 +99,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// guarantee we are producing byte-identical binaries as before this code.
// But it should be unnecessary.
if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris {
- nopout(p)
+ obj.Nopout(p)
}
if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 {
p.From.Reg = REG_TLS
@@ -175,12 +165,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
32,
64:
ctxt.Mode = int(p.From.Offset)
- break
}
}
- nopout(p)
- break
+ obj.Nopout(p)
}
// Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
@@ -191,7 +179,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH
}
- break
}
// Rewrite float constants to values stored in memory.
@@ -290,8 +277,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Sym = s
p.From.Offset = 0
}
-
- break
}
}
@@ -325,7 +310,6 @@ func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
a.Scale = 1
}
a.Reg = REG_R15
- break
}
}
}
@@ -1063,7 +1047,6 @@ loop:
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q.Pcond = p
- q.Ctxt = p.Ctxt
p = q
}
@@ -1128,6 +1111,8 @@ loop:
}
var Linkamd64 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.LittleEndian,
Pconv: Pconv,
Name: "amd64",
@@ -1143,6 +1128,8 @@ var Linkamd64 = obj.LinkArch{
}
var Linkamd64p32 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
ByteOrder: binary.LittleEndian,
Pconv: Pconv,
Name: "amd64p32",
diff --git a/src/cmd/internal/obj/x86/util.go b/src/cmd/internal/obj/x86/util.go
index 4736fabc00..dceedf85ea 100644
--- a/src/cmd/internal/obj/x86/util.go
+++ b/src/cmd/internal/obj/x86/util.go
@@ -4,10 +4,6 @@
package x86
-const (
- fmtLong = 1 << iota
-)
-
func bool2int(b bool) int {
if b {
return 1
diff --git a/src/cmd/new5g/cgen.go b/src/cmd/new5g/cgen.go
new file mode 100644
index 0000000000..f1a42c1ea1
--- /dev/null
+++ b/src/cmd/new5g/cgen.go
@@ -0,0 +1,2004 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var f0 gc.Node
+ var f1 gc.Node
+ var a int
+ var w int
+ var rg int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ return
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 && res.Addable != 0 {
+ if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
+ gmove(n, res)
+ } else {
+ regalloc(&n1, n.Type, nil)
+ gmove(n, &n1)
+ cgen(&n1, res)
+ regfree(&n1)
+ }
+
+ goto ret
+ }
+
+ // if both are not addressable, use a temporary.
+ if !(n.Addable != 0) && !(res.Addable != 0) {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ gc.Tempname(&n1, n.Type)
+
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if !(res.Addable != 0) {
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ return
+ }
+
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ return
+ }
+
+ // if n is sudoaddable generate addr and move
+ if !(gc.Is64(n.Type) != 0) && !(gc.Is64(res.Type) != 0) && !(gc.Iscomplex[n.Type.Etype] != 0) && !(gc.Iscomplex[res.Type.Etype] != 0) {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr, &w) != 0 {
+ if res.Op != gc.OREGISTER {
+ regalloc(&n2, res.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ gmove(&n2, res)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ // otherwise, the result is addressable but n is not.
+ // let's do some computation.
+
+ nl = n.Left
+
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+ switch n.Op {
+ // math goes to cgen64.
+ case gc.OMINUS,
+ gc.OCOM,
+ gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ cgen64(n, res)
+
+ return
+ }
+ }
+
+ if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
+ goto flt
+ }
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case gc.OREAL,
+ gc.OIMAG,
+ gc.OCOMPLEX:
+ gc.Fatal("unexpected complex")
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(arm.AB, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(1), res)
+ p3 = gc.Gbranch(arm.AB, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(0), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ goto norm
+
+ case gc.OMINUS:
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, 0)
+ gins(optoas(gc.OMINUS, nl.Type), &n2, &n1)
+ goto norm
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OLROT,
+ gc.OLSH,
+ gc.ORSH:
+ cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ cgen(nl, res)
+ break
+ }
+
+ if nl.Addable != 0 && !(gc.Is64(nl.Type) != 0) {
+ regalloc(&n1, nl.Type, res)
+ gmove(nl, &n1)
+ } else {
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) != 0 || gc.Isfloat[nl.Type.Etype] != 0 {
+ gc.Tempname(&n1, nl.Type)
+ } else {
+ regalloc(&n1, nl.Type, res)
+ }
+ cgen(nl, &n1)
+ }
+
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) != 0 || gc.Isfloat[n.Type.Etype] != 0 {
+ gc.Tempname(&n2, n.Type)
+ } else {
+ regalloc(&n2, n.Type, nil)
+ }
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ if n1.Op == gc.OREGISTER {
+ regfree(&n1)
+ }
+ if n2.Op == gc.OREGISTER {
+ regfree(&n2)
+ }
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(arm.AMOVW, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // map has len in the first 32-bit word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ // both slice and string have len one pointer into the struct.
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // chan has cap in the second 32-bit word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = 4
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ agen(nl, res)
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC:
+ rg = -1
+
+ if n.Ullman >= gc.UINF {
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ rg = int(res.Val.U.Reg)
+ reg[rg]--
+ }
+ }
+
+ if n.Op == gc.OCALLMETH {
+ gc.Cgen_callmeth(n, 0)
+ } else {
+ cgen_call(n, 0)
+ }
+ if rg >= 0 {
+ reg[rg]++
+ }
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ goto ret
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ // TODO(kaib): use fewer registers here.
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ if gc.Smallintconst(nr) != 0 {
+ n2 = *nr
+ break
+ }
+ fallthrough
+
+ default:
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ }
+ } else {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ if gc.Smallintconst(nr) != 0 {
+ n2 = *nr
+ break
+ }
+ fallthrough
+
+ default:
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+
+ // Normalize result for types smaller than word.
+norm:
+ if n.Type.Width < int64(gc.Widthptr) {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OCOM,
+ gc.OMINUS:
+ gins(optoas(gc.OAS, n.Type), &n1, &n1)
+ }
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+flt: // floating-point.
+ regalloc(&f0, nl.Type, res)
+
+ if nr != nil {
+ goto flt2
+ }
+
+ if n.Op == gc.OMINUS {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ n.Op = gc.OMUL
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(optoas(int(n.Op), n.Type), &f0, &f0)
+ }
+ gmove(&f0, res)
+ regfree(&f0)
+ goto ret
+
+flt2: // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ regalloc(&f1, n.Type, nil)
+ gmove(&f0, &f1)
+ cgen(nr, &f0)
+ gins(optoas(int(n.Op), n.Type), &f0, &f1)
+ } else {
+ cgen(nr, &f0)
+ regalloc(&f1, n.Type, nil)
+ cgen(nl, &f1)
+ gins(optoas(int(n.Op), n.Type), &f0, &f1)
+ }
+
+ gmove(&f1, res)
+ regfree(&f0)
+ regfree(&f1)
+ goto ret
+
+ret:
+}
+
+/*
+ * generate array index into res.
+ * n might be any size; res is 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+ var tmp gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var zero gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+
+ if !(gc.Is64(n.Type) != 0) {
+ cgen(n, res)
+ return nil
+ }
+
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(n, &tmp)
+ split64(&tmp, &lo, &hi)
+ gmove(&lo, res)
+ if bounded != 0 {
+ splitclean()
+ return nil
+ }
+
+ regalloc(&n1, gc.Types[gc.TINT32], nil)
+ regalloc(&n2, gc.Types[gc.TINT32], nil)
+ gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+ gmove(&hi, &n1)
+ gmove(&zero, &n2)
+ gcmp(arm.ACMP, &n1, &n2)
+ regfree(&n2)
+ regfree(&n1)
+ splitclean()
+ return gc.Gbranch(arm.ABNE, nil, -1)
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var r int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil || res == nil || res.Type == nil {
+ gc.Fatal("agen")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(arm.AMOVW, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ n1 = gc.Node{}
+ n1.Op = gc.OADDR
+ n1.Left = n
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(arm.AMOVW, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC:
+ r = -1
+
+ if n.Ullman >= gc.UINF {
+ if res.Op == gc.OREGISTER || res.Op == gc.OINDREG {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+ }
+
+ if n.Op == gc.OCALLMETH {
+ gc.Cgen_callmeth(n, 0)
+ } else {
+ cgen_call(n, 0)
+ }
+ if r >= 0 {
+ reg[r]++
+ }
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var r int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != arm.REGSP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.ODOTPTR:
+ if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
+ // igen-able nodes.
+ igen(n.Left, &n1, res)
+
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+ } else {
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen(n.Left, a)
+ }
+
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset = n.Xoffset
+ a.Type = n.Type
+ return
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC,
+ gc.OCALLINTER:
+ r = -1
+
+ if n.Ullman >= gc.UINF {
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+ }
+
+ switch n.Op {
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ if r >= 0 {
+ reg[r]++
+ }
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen_aret(n, a)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+ return
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * allocate a register in res and generate
+ * newreg = &n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ *
+ * caller must regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var w uint32
+ var v uint64
+ var bounded int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("agenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+ bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+ if nr.Addable != 0 {
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ }
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ p2 = cgenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ p2 = cgenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ } else {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ p2 = cgenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Fatal("constant string constant index")
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, n1.Type, nil)
+ gmove(&n1, &n4)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
+ regfree(&n4)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], int64(v*uint64(w)))
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ *a = n3
+ break
+ }
+
+ regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ // check bounds
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, gc.Types[gc.TUINT32], nil)
+ gmove(&n1, &n4)
+ } else {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT32], nl.Type.Bound)
+ }
+
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n2, &n4)
+ if n4.Op == gc.OREGISTER {
+ regfree(&n4)
+ }
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(arm.AMOVW, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ n4 = gc.Node{}
+ n4.Op = gc.OADDR
+ n4.Left = &n2
+ cgen(&n4, &n3)
+ if w == 1 {
+ gins(arm.AADD, &n2, &n3)
+ } else if w == 2 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 1, &n3)
+ } else if w == 4 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 2, &n3)
+ } else if w == 8 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 3, &n3)
+ }
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT32], nil)
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], int64(w))
+ gmove(&n1, &n4)
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &n4, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ regfree(&n4)
+ }
+
+ *a = n3
+ regfree(&n2)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var a int
+
+ regalloc(&n1, t, nil)
+ cgen(n, &n1)
+ a = optoas(gc.OCMP, t)
+ if a != arm.ACMP {
+ gc.Nodconst(&n2, t, 0)
+ regalloc(&n3, t, nil)
+ gmove(&n2, &n3)
+ gcmp(a, &n1, &n3)
+ regfree(&n3)
+ } else {
+ gins(arm.ATST, &n1, nil)
+ }
+ a = optoas(o, t)
+ gc.Patch(gc.Gbranch(a, t, likely), to)
+ regfree(&n1)
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(1)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ switch n.Op {
+ default:
+ a = gc.ONE
+ if !true_ {
+ a = gc.OEQ
+ }
+ gencmp0(n, n.Type, a, likely, to)
+ goto ret
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == !(n.Val.U.Bval != 0) {
+ gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
+ }
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(arm.AB, nil, 0)
+
+ p2 = gc.Gbranch(arm.AB, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ // only valid to cmp darray to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal array comparison")
+ break
+ }
+
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) != 0 {
+ // front end shold only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset += 0
+ gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if gc.Is64(nr.Type) != 0 {
+ if !(nl.Addable != 0) {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if !(nr.Addable != 0) {
+ gc.Tempname(&n2, nr.Type)
+ cgen(nr, &n2)
+ nr = &n2
+ }
+
+ cmp64(nl, nr, a, likely, to)
+ break
+ }
+
+ if nr.Op == gc.OLITERAL {
+ if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
+ gencmp0(nl, nl.Type, a, likely, to)
+ break
+ }
+
+ if nr.Val.Ctype == gc.CTNIL {
+ gencmp0(nl, nl.Type, a, likely, to)
+ break
+ }
+ }
+
+ a = optoas(a, nr.Type)
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+
+ regfree(&n1)
+ regfree(&n2)
+ break
+ }
+
+ gc.Tempname(&n3, nl.Type)
+ cgen(nl, &n3)
+
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+
+ regalloc(&n1, nl.Type, nil)
+ gmove(&n3, &n1)
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(&tmp, &n2)
+
+ gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ if n.Op == gc.ONE {
+ p1 = gc.Gbranch(arm.ABVS, nr.Type, likely)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ gc.Patch(p1, to)
+ } else {
+ p1 = gc.Gbranch(arm.ABVS, nr.Type, -likely)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ gc.Patch(p1, gc.Pc)
+ }
+ } else {
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ }
+
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int32 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int32
+
+ switch n.Op {
+ case gc.OINDREG:
+ return int32(n.Xoffset)
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return int32(int64(off) + n.Xoffset)
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !(gc.Isfixedarray(t) != 0) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return int32(t.Width + 4) // correct for LR
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&res, &n, w);
+ * NB: character copy assumed little endian architecture
+ */
+func sgen(n *gc.Node, res *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tmp gc.Node
+ var nend gc.Node
+ var r0 gc.Node
+ var r1 gc.Node
+ var r2 gc.Node
+ var f *gc.Node
+ var c int32
+ var odst int32
+ var osrc int32
+ var dir int
+ var align int
+ var op int
+ var p *obj.Prog
+ var ploop *obj.Prog
+ var l *gc.NodeList
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", res)
+ }
+
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 || int64(int32(w)) != w {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ if n.Type == nil {
+ gc.Fatal("sgen: missing type")
+ }
+
+ if w == 0 {
+ // evaluate side effects only.
+ regalloc(&dst, gc.Types[gc.Tptr], nil)
+
+ agen(res, &dst)
+ agen(n, &dst)
+ regfree(&dst)
+ return
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if res.Op == gc.ONAME && res.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, res) != 0 {
+ return
+ }
+
+ // determine alignment.
+ // want to avoid unaligned access, so have to use
+ // smaller operations for less aligned types.
+ // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+ align = int(n.Type.Align)
+
+ switch align {
+ default:
+ gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
+ fallthrough
+
+ case 1:
+ op = arm.AMOVB
+
+ case 2:
+ op = arm.AMOVH
+
+ case 4:
+ op = arm.AMOVW
+ }
+
+ if w%int64(align) != 0 {
+ gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
+ }
+ c = int32(w / int64(align))
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(res)
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, res, w)
+ return
+ }
+
+ if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ }
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ dir = align
+
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ dir = -dir
+ }
+
+ if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
+ r0.Op = gc.OREGISTER
+ r0.Val.U.Reg = REGALLOC_R0
+ r1.Op = gc.OREGISTER
+ r1.Val.U.Reg = REGALLOC_R0 + 1
+ r2.Op = gc.OREGISTER
+ r2.Val.U.Reg = REGALLOC_R0 + 2
+
+ regalloc(&src, gc.Types[gc.Tptr], &r1)
+ regalloc(&dst, gc.Types[gc.Tptr], &r2)
+ if n.Ullman >= res.Ullman {
+ // eval n first
+ agen(n, &src)
+
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ // eval res first
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ agen(n, &src)
+ }
+
+ regalloc(&tmp, gc.Types[gc.Tptr], &r0)
+ f = gc.Sysfunc("duffcopy")
+ p = gins(obj.ADUFFCOPY, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 8 and 128 = magic constants: see ../../runtime/asm_arm.s
+ p.To.Offset = 8 * (128 - int64(c))
+
+ regfree(&tmp)
+ regfree(&src)
+ regfree(&dst)
+ return
+ }
+
+ if n.Ullman >= res.Ullman {
+ agenr(n, &dst, res) // temporarily use dst
+ regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(arm.AMOVW, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agenr(res, &dst, res)
+ agenr(n, &src, nil)
+ }
+
+ regalloc(&tmp, gc.Types[gc.TUINT32], nil)
+
+ // set up end marker
+ nend = gc.Node{}
+
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.TUINT32], nil)
+
+ p = gins(arm.AMOVW, &src, &nend)
+ p.From.Type = obj.TYPE_ADDR
+ if dir < 0 {
+ p.From.Offset = int64(dir)
+ } else {
+ p.From.Offset = w
+ }
+ }
+
+ // move src and dest to the end of block if necessary
+ if dir < 0 {
+ p = gins(arm.AMOVW, &src, &src)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w + int64(dir)
+
+ p = gins(arm.AMOVW, &dst, &dst)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w + int64(dir)
+ }
+
+ // move
+ if c >= 4 {
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+ ploop = p
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+
+ p = gins(arm.ACMP, &src, nil)
+ raddr(&nend, p)
+
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
+ regfree(&nend)
+ } else {
+ for {
+ tmp14 := c
+ c--
+ if !(tmp14 > 0) {
+ break
+ }
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+ }
+ }
+
+ regfree(&dst)
+ regfree(&src)
+ regfree(&tmp)
+}
+
+func cadable(n *gc.Node) int {
+ if !(n.Addable != 0) {
+ // dont know how it happens,
+ // but it does
+ return 0
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if cant.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) int {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) != 0 {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) != 0 {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !(cadable(nl) != 0) {
+ if nr != nil && !(cadable(nr) != 0) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !(cadable(nr) != 0) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !(gc.Isslice(t) != 0) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 0
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 1
+}
diff --git a/src/cmd/new5g/cgen64.go b/src/cmd/new5g/cgen64.go
new file mode 100644
index 0000000000..e5fefe3b5e
--- /dev/null
+++ b/src/cmd/new5g/cgen64.go
@@ -0,0 +1,836 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+/*
+ * attempt to generate 64-bit
+ * res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+ var t1 gc.Node
+ var t2 gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var lo1 gc.Node
+ var lo2 gc.Node
+ var hi1 gc.Node
+ var hi2 gc.Node
+ var al gc.Node
+ var ah gc.Node
+ var bl gc.Node
+ var bh gc.Node
+ var cl gc.Node
+ var ch gc.Node
+ var s gc.Node
+ var n1 gc.Node
+ var creg gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var p4 *obj.Prog
+ var p5 *obj.Prog
+ var p6 *obj.Prog
+ var v uint64
+
+ if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+ gc.Dump("n", n)
+ gc.Dump("res", res)
+ gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ }
+
+ l = n.Left
+ if !(l.Addable != 0) {
+ gc.Tempname(&t1, l.Type)
+ cgen(l, &t1)
+ l = &t1
+ }
+
+ split64(l, &lo1, &hi1)
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ fallthrough
+
+ case gc.OMINUS:
+ split64(res, &lo2, &hi2)
+
+ regalloc(&t1, lo1.Type, nil)
+ regalloc(&al, lo1.Type, nil)
+ regalloc(&ah, hi1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+
+ gmove(ncon(0), &t1)
+ p1 = gins(arm.ASUB, &al, &t1)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.AMOVW, &t1, &lo2)
+
+ gmove(ncon(0), &t1)
+ gins(arm.ASBC, &ah, &t1)
+ gins(arm.AMOVW, &t1, &hi2)
+
+ regfree(&t1)
+ regfree(&al)
+ regfree(&ah)
+ splitclean()
+ splitclean()
+ return
+
+ case gc.OCOM:
+ regalloc(&t1, lo1.Type, nil)
+ gmove(ncon(^uint32(0)), &t1)
+
+ split64(res, &lo2, &hi2)
+ regalloc(&n1, lo1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &n1)
+ gins(arm.AEOR, &t1, &n1)
+ gins(arm.AMOVW, &n1, &lo2)
+
+ gins(arm.AMOVW, &hi1, &n1)
+ gins(arm.AEOR, &t1, &n1)
+ gins(arm.AMOVW, &n1, &hi2)
+
+ regfree(&t1)
+ regfree(&n1)
+ splitclean()
+ splitclean()
+ return
+
+ // binary operators.
+ // common setup below.
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OLROT:
+ break
+ }
+
+ // setup for binary operators
+ r = n.Right
+
+ if r != nil && !(r.Addable != 0) {
+ gc.Tempname(&t2, r.Type)
+ cgen(r, &t2)
+ r = &t2
+ }
+
+ if gc.Is64(r.Type) != 0 {
+ split64(r, &lo2, &hi2)
+ }
+
+ regalloc(&al, lo1.Type, nil)
+ regalloc(&ah, hi1.Type, nil)
+
+ // Do op. Leave result in ah:al.
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
+ fallthrough
+
+ // TODO: Constants
+ case gc.OADD:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi2, &bh)
+ gins(arm.AMOVW, &lo2, &bl)
+ p1 = gins(arm.AADD, &bl, &al)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.AADC, &bh, &ah)
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO: Constants.
+ case gc.OSUB:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo2, &bl)
+ gins(arm.AMOVW, &hi2, &bh)
+ p1 = gins(arm.ASUB, &bl, &al)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.ASBC, &bh, &ah)
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO(kaib): this can be done with 4 regs and does not need 6
+ case gc.OMUL:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ regalloc(&cl, gc.Types[gc.TPTR32], nil)
+ regalloc(&ch, gc.Types[gc.TPTR32], nil)
+
+ // load args into bh:bl and bh:bl.
+ gins(arm.AMOVW, &hi1, &bh)
+
+ gins(arm.AMOVW, &lo1, &bl)
+ gins(arm.AMOVW, &hi2, &ch)
+ gins(arm.AMOVW, &lo2, &cl)
+
+ // bl * cl -> ah al
+ p1 = gins(arm.AMULLU, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bl.Val.U.Reg
+ p1.Reg = cl.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(al.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ // bl * ch + ah -> ah
+ p1 = gins(arm.AMULA, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bl.Val.U.Reg
+ p1.Reg = ch.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG2
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(ah.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ // bh * cl + ah -> ah
+ p1 = gins(arm.AMULA, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bh.Val.U.Reg
+ p1.Reg = cl.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG2
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(ah.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ regfree(&bh)
+
+ regfree(&bl)
+ regfree(&ch)
+ regfree(&cl)
+
+ // We only rotate by a constant c in [0,64).
+ // if c >= 32:
+ // lo, hi = hi, lo
+ // c -= 32
+ // if c == 0:
+ // no-op
+ // else:
+ // t = hi
+ // shld hi:lo, c
+ // shld lo:t, c
+ case gc.OLROT:
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ if v >= 32 {
+ // reverse during load to do the first 32 bits of rotate
+ v -= 32
+
+ gins(arm.AMOVW, &hi1, &bl)
+ gins(arm.AMOVW, &lo1, &bh)
+ } else {
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+ }
+
+ if v == 0 {
+ gins(arm.AMOVW, &bh, &ah)
+ gins(arm.AMOVW, &bl, &al)
+ } else {
+ // rotate by 1 <= v <= 31
+ // MOVW bl<<v, al
+ // MOVW bh<<v, ah
+ // OR bl>>(32-v), ah
+ // OR bh>>(32-v), al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+ gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+ gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
+ }
+
+ regfree(&bl)
+ regfree(&bh)
+
+ case gc.OLSH:
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
+ // here and below (verify it optimizes to EOR)
+ gins(arm.AEOR, &al, &al)
+
+ gins(arm.AEOR, &ah, &ah)
+ } else if v > 32 {
+ gins(arm.AEOR, &al, &al)
+
+ // MOVW bl<<(v-32), ah
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
+ } else if v == 32 {
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AMOVW, &bl, &ah)
+ } else if v > 0 {
+ // MOVW bl<<v, al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+ // MOVW bh<<v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+
+ // OR bl>>(32-v), ah
+ gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+ } else {
+ gins(arm.AMOVW, &bl, &al)
+ gins(arm.AMOVW, &bh, &ah)
+ }
+
+ goto olsh_break
+ }
+
+ regalloc(&s, gc.Types[gc.TUINT32], nil)
+ regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ if gc.Is64(r.Type) != 0 {
+ // shift is >= 1<<32
+ split64(r, &cl, &ch)
+
+ gmove(&ch, &s)
+ gins(arm.ATST, &s, nil)
+ p6 = gc.Gbranch(arm.ABNE, nil, 0)
+ gmove(&cl, &s)
+ splitclean()
+ } else {
+ gmove(r, &s)
+ p6 = nil
+ }
+
+ gins(arm.ATST, &s, nil)
+
+ // shift == 0
+ p1 = gins(arm.AMOVW, &bl, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bh, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // shift is < 32
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO bl<<s, al
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW.LO bh<<s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO s, creg
+ p1 = gins(arm.ASUB, &s, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // OR.LO bl>>creg, ah
+ p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // BLO end
+ p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift == 32
+ p1 = gins(arm.AEOR, &al, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bl, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // shift is < 64
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // EOR.LO al, al
+ p1 = gins(arm.AEOR, &al, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW.LO creg>>1, creg
+ p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO creg, s
+ p1 = gins(arm.ASUB, &creg, &s)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW bl<<s, ah
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift >= 64
+ if p6 != nil {
+ gc.Patch(p6, gc.Pc)
+ }
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AEOR, &ah, &ah)
+
+ gc.Patch(p2, gc.Pc)
+ gc.Patch(p3, gc.Pc)
+ gc.Patch(p4, gc.Pc)
+ gc.Patch(p5, gc.Pc)
+ regfree(&s)
+ regfree(&creg)
+
+ olsh_break:
+ regfree(&bl)
+ regfree(&bh)
+
+ case gc.ORSH:
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v > 32 {
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->(v-32), al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)
+
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ // MOVW bh>>(v-32), al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)
+
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v == 32 {
+ gins(arm.AMOVW, &bh, &al)
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v > 0 {
+ // MOVW bl>>v, al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)
+
+ // OR bh<<(32-v), al
+ gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
+ } else {
+ // MOVW bh>>v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
+ }
+ } else {
+ gins(arm.AMOVW, &bl, &al)
+ gins(arm.AMOVW, &bh, &ah)
+ }
+
+ goto orsh_break
+ }
+
+ regalloc(&s, gc.Types[gc.TUINT32], nil)
+ regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ if gc.Is64(r.Type) != 0 {
+ // shift is >= 1<<32
+ split64(r, &cl, &ch)
+
+ gmove(&ch, &s)
+ gins(arm.ATST, &s, nil)
+ if bh.Type.Etype == gc.TINT32 {
+ p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ p1 = gins(arm.AEOR, &ah, &ah)
+ }
+ p1.Scond = arm.C_SCOND_NE
+ p6 = gc.Gbranch(arm.ABNE, nil, 0)
+ gmove(&cl, &s)
+ splitclean()
+ } else {
+ gmove(r, &s)
+ p6 = nil
+ }
+
+ gins(arm.ATST, &s, nil)
+
+ // shift == 0
+ p1 = gins(arm.AMOVW, &bl, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bh, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // check if shift is < 32
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO bl>>s, al
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO s,creg
+ p1 = gins(arm.ASUB, &s, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // OR.LO bh<<(32-s), al
+ p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
+ } else {
+ // MOVW bh>>s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
+ }
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // BLO end
+ p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift == 32
+ p1 = gins(arm.AMOVW, &bh, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ if bh.Type.Etype == gc.TINT32 {
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &ah, &ah)
+ }
+ p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // check if shift is < 64
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO creg>>1, creg
+ p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO creg, s
+ p1 = gins(arm.ASUB, &creg, &s)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->(s-32), al
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+ } else {
+ // MOVW bh>>(v-32), al
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+ }
+
+ // BLO end
+ p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // s >= 64
+ if p6 != nil {
+ gc.Patch(p6, gc.Pc)
+ }
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+ } else {
+ gins(arm.AEOR, &al, &al)
+ }
+
+ gc.Patch(p2, gc.Pc)
+ gc.Patch(p3, gc.Pc)
+ gc.Patch(p4, gc.Pc)
+ gc.Patch(p5, gc.Pc)
+ regfree(&s)
+ regfree(&creg)
+
+ orsh_break:
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO(kaib): literal optimizations
+ // make constant the right side (it usually is anyway).
+ // if(lo1.op == OLITERAL) {
+ // nswap(&lo1, &lo2);
+ // nswap(&hi1, &hi2);
+ // }
+ // if(lo2.op == OLITERAL) {
+ // // special cases for constants.
+ // lv = mpgetfix(lo2.val.u.xval);
+ // hv = mpgetfix(hi2.val.u.xval);
+ // splitclean(); // right side
+ // split64(res, &lo2, &hi2);
+ // switch(n->op) {
+ // case OXOR:
+ // gmove(&lo1, &lo2);
+ // gmove(&hi1, &hi2);
+ // switch(lv) {
+ // case 0:
+ // break;
+ // case 0xffffffffu:
+ // gins(ANOTL, N, &lo2);
+ // break;
+ // default:
+ // gins(AXORL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // break;
+ // case 0xffffffffu:
+ // gins(ANOTL, N, &hi2);
+ // break;
+ // default:
+ // gins(AXORL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+
+ // case OAND:
+ // switch(lv) {
+ // case 0:
+ // gins(AMOVL, ncon(0), &lo2);
+ // break;
+ // default:
+ // gmove(&lo1, &lo2);
+ // if(lv != 0xffffffffu)
+ // gins(AANDL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // gins(AMOVL, ncon(0), &hi2);
+ // break;
+ // default:
+ // gmove(&hi1, &hi2);
+ // if(hv != 0xffffffffu)
+ // gins(AANDL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+
+ // case OOR:
+ // switch(lv) {
+ // case 0:
+ // gmove(&lo1, &lo2);
+ // break;
+ // case 0xffffffffu:
+ // gins(AMOVL, ncon(0xffffffffu), &lo2);
+ // break;
+ // default:
+ // gmove(&lo1, &lo2);
+ // gins(AORL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // gmove(&hi1, &hi2);
+ // break;
+ // case 0xffffffffu:
+ // gins(AMOVL, ncon(0xffffffffu), &hi2);
+ // break;
+ // default:
+ // gmove(&hi1, &hi2);
+ // gins(AORL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+ // }
+ // splitclean();
+ // splitclean();
+ // goto out;
+ // }
+ case gc.OXOR,
+ gc.OAND,
+ gc.OOR:
+ regalloc(&n1, lo1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo2, &n1)
+ gins(optoas(int(n.Op), lo1.Type), &n1, &al)
+ gins(arm.AMOVW, &hi2, &n1)
+ gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
+ regfree(&n1)
+ }
+
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ splitclean()
+
+ split64(res, &lo1, &hi1)
+ gins(arm.AMOVW, &al, &lo1)
+ gins(arm.AMOVW, &ah, &hi1)
+ splitclean()
+
+ //out:
+ regfree(&al)
+
+ regfree(&ah)
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+ var lo1 gc.Node
+ var hi1 gc.Node
+ var lo2 gc.Node
+ var hi2 gc.Node
+ var r1 gc.Node
+ var r2 gc.Node
+ var br *obj.Prog
+ var t *gc.Type
+
+ split64(nl, &lo1, &hi1)
+ split64(nr, &lo2, &hi2)
+
+ // compare most significant word;
+ // if they differ, we're done.
+ t = hi1.Type
+
+ regalloc(&r1, gc.Types[gc.TINT32], nil)
+ regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gins(arm.AMOVW, &hi1, &r1)
+ gins(arm.AMOVW, &hi2, &r2)
+ gcmp(arm.ACMP, &r1, &r2)
+ regfree(&r1)
+ regfree(&r2)
+
+ br = nil
+ switch op {
+ default:
+ gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+ fallthrough
+
+ // cmp hi
+ // bne L
+ // cmp lo
+ // beq to
+ // L:
+ case gc.OEQ:
+ br = gc.Gbranch(arm.ABNE, nil, -likely)
+
+ // cmp hi
+ // bne to
+ // cmp lo
+ // bne to
+ case gc.ONE:
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, likely), to)
+
+ // cmp hi
+ // bgt to
+ // blt L
+ // cmp lo
+ // bge to (or bgt to)
+ // L:
+ case gc.OGE,
+ gc.OGT:
+ gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+ // cmp hi
+ // blt to
+ // bgt L
+ // cmp lo
+ // ble to (or jlt to)
+ // L:
+ case gc.OLE,
+ gc.OLT:
+ gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+ }
+
+ // compare least significant word
+ t = lo1.Type
+
+ regalloc(&r1, gc.Types[gc.TINT32], nil)
+ regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gins(arm.AMOVW, &lo1, &r1)
+ gins(arm.AMOVW, &lo2, &r2)
+ gcmp(arm.ACMP, &r1, &r2)
+ regfree(&r1)
+ regfree(&r2)
+
+ // jump again
+ gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+ // point first branch down here if appropriate
+ if br != nil {
+ gc.Patch(br, gc.Pc)
+ }
+
+ splitclean()
+ splitclean()
+}
diff --git a/src/cmd/new5g/galign.go b/src/cmd/new5g/galign.go
new file mode 100644
index 0000000000..95f11ec7dc
--- /dev/null
+++ b/src/cmd/new5g/galign.go
@@ -0,0 +1,84 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+var thechar int = '5'
+
+var thestring string = "arm"
+
+var thelinkarch *obj.LinkArch = &arm.Linkarm
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT32},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ gc.Widthreg = 4
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = arm.REGSP
+ gc.Thearch.REGCTXT = arm.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
diff --git a/src/cmd/new5g/gg.go b/src/cmd/new5g/gg.go
new file mode 100644
index 0000000000..7a7fb3b774
--- /dev/null
+++ b/src/cmd/new5g/gg.go
@@ -0,0 +1,32 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/arm"
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+const (
+ REGALLOC_R0 = arm.REG_R0
+ REGALLOC_RMAX = arm.REGEXT
+ REGALLOC_F0 = arm.REG_F0
+ REGALLOC_FMAX = arm.FREGEXT
+)
+
+var reg [REGALLOC_FMAX + 1]uint8
+
+/*
+ * cgen
+ */
+
+/*
+ * list.c
+ */
+
+/*
+ * reg.c
+ */
diff --git a/src/cmd/new5g/ggen.go b/src/cmd/new5g/ggen.go
new file mode 100644
index 0000000000..d81f405203
--- /dev/null
+++ b/src/cmd/new5g/ggen.go
@@ -0,0 +1,822 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var r0 uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to contain ambiguously live variables
+ // so that garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ r0 = 0
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !(n.Needzero != 0) {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
+ // merge with range we already have
+ lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &r0)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &r0)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+ var p1 *obj.Prog
+ var f *gc.Node
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *r0 == 0 {
+ p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ *r0 = 1
+ }
+
+ if cnt < int64(4*gc.Widthptr) {
+ for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
+ }
+ } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ f = gc.Sysfunc("duffzero")
+ gc.Naddr(f, &p.To, 1)
+ gc.Afunclit(&p.To, f)
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
+ p.Reg = arm.REG_R1
+ p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p1 = p
+ p.Scond |= arm.C_PBIT
+ p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm.REG_R2
+ p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
+ var q *obj.Prog
+
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = int64(foffset)
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = int64(toffset)
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var r gc.Node
+ var r1 gc.Node
+ var con gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the BL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction before that return PC.
+ // To avoid that instruction being an unrelated instruction,
+ // insert a NOP so that we will have the right line number.
+ // ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
+ // Use the latter form because the NOP pseudo-instruction
+ // would be removed by the linker.
+ gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
+
+ p = gins(arm.AAND, &r, &r)
+ p.Scond = arm.C_SCOND_EQ
+ }
+
+ p = gins(arm.ABL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) != 0 {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
+ gmove(f, &r)
+ r.Op = gc.OINDREG
+ gmove(&r, &r1)
+ r.Op = gc.OREGISTER
+ r1.Op = gc.OINDREG
+ gins(arm.ABL, &r, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(arm.ABL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ regalloc(&r, gc.Types[gc.Tptr], nil)
+
+ gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
+ gins(arm.AMOVW, &con, &r)
+ p = gins(arm.AMOVW, &r, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REGSP
+ p.To.Offset = 4
+
+ gins(arm.AMOVW, f, &r)
+ p = gins(arm.AMOVW, &r, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REGSP
+ p.To.Offset = 8
+
+ regfree(&r)
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
+ p = gins(arm.ACMP, &con, nil)
+ p.Reg = arm.REG_R0
+ p = gc.Gbranch(arm.ABEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var r int
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+ var p *obj.Prog
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ // Release res register during genlist and cgen,
+ // which might have their own function calls.
+ r = -1
+
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+
+ if !(i.Addable != 0) {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // args
+ if r >= 0 {
+ reg[r]++
+ }
+
+ regalloc(&nodr, gc.Types[gc.Tptr], res)
+ regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
+ nodo.Op = gc.OINDREG
+
+ agen(i, &nodr) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
+
+ nodsp.Xoffset = int64(gc.Widthptr)
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodo.Xoffset += int64(gc.Widthptr)
+ cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
+
+ nodo.Xoffset -= int64(gc.Widthptr)
+
+ cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
+ gc.Cgen_checknil(&nodr) // in case offset is huge
+
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
+ nodr.Op = gc.OINDREG
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ p = gins(arm.AMOVW, &nodo, &nodr)
+
+ p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ goto ret
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ goto ret
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+
+ret:
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = arm.REGSP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = arm.REGSP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ agen(&nod1, &nod2)
+ gins(arm.AMOVW, &nod2, res)
+ regfree(&nod2)
+ } else {
+ agen(&nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate high multiply
+ * res = (nl * nr) >> wordsize
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var w int
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var p *obj.Prog
+
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ t = nl.Type
+ w = int(t.Width * 8)
+ regalloc(&n1, t, res)
+ cgen(nl, &n1)
+ regalloc(&n2, t, nil)
+ cgen(nr, &n2)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+
+ case gc.TUINT8,
+ gc.TUINT16:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
+
+ // perform a long multiplication.
+ case gc.TINT32,
+ gc.TUINT32:
+ if gc.Issigned[t.Etype] != 0 {
+ p = gins(arm.AMULL, &n2, nil)
+ } else {
+ p = gins(arm.AMULLU, &n2, nil)
+ }
+
+ // n2 * n1 -> (n1 n2)
+ p.Reg = n1.Val.U.Reg
+
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = n1.Val.U.Reg
+ p.To.Offset = int64(n2.Val.U.Reg)
+
+ default:
+ gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var nt gc.Node
+ var t gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var w int
+ var v int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var tr *gc.Type
+ var sc uint64
+
+ if nl.Type.Width > 4 {
+ gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
+ }
+
+ w = int(nl.Type.Width * 8)
+
+ if op == gc.OLROT {
+ v = int(gc.Mpgetfix(nr.Val.U.Xval))
+ regalloc(&n1, nl.Type, res)
+ if w == 32 {
+ cgen(nl, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
+ } else {
+ regalloc(&n2, nl.Type, nil)
+ cgen(nl, &n2)
+ gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
+ gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
+ regfree(&n2)
+
+ // Ensure sign/zero-extended result.
+ gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc == 0 {
+ } else // nothing to do
+ if sc >= uint64(nl.Type.Width*8) {
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+ } else {
+ gins(arm.AEOR, &n1, &n1)
+ }
+ } else {
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
+ } else if op == gc.ORSH {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
+ } else {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
+ }
+ }
+
+ if w < 32 && op == gc.OLSH {
+ gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ tr = nr.Type
+ if tr.Width > 4 {
+ gc.Tempname(&nt, nr.Type)
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ cgen(nr, &nt)
+ n1 = nt
+ } else {
+ cgen(nr, &nt)
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ }
+
+ split64(&nt, &lo, &hi)
+ regalloc(&n1, gc.Types[gc.TUINT32], nil)
+ regalloc(&n3, gc.Types[gc.TUINT32], nil)
+ gmove(&lo, &n1)
+ gmove(&hi, &n3)
+ splitclean()
+ gins(arm.ATST, &n3, nil)
+ gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+ p1 = gins(arm.AMOVW, &t, &n1)
+ p1.Scond = arm.C_SCOND_NE
+ tr = gc.Types[gc.TUINT32]
+ regfree(&n3)
+ } else {
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ regalloc(&n1, nr.Type, nil)
+ cgen(nr, &n1)
+ } else {
+ regalloc(&n1, nr.Type, nil)
+ cgen(nr, &n1)
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ }
+ }
+
+ // test for shift being 0
+ gins(arm.ATST, &n1, nil)
+
+ p3 = gc.Gbranch(arm.ABEQ, nil, -1)
+
+ // test and fix up large shifts
+ // TODO: if(!bounded), don't emit some of this.
+ regalloc(&n3, tr, nil)
+
+ gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+ gmove(&t, &n3)
+ gcmp(arm.ACMP, &n1, &n3)
+ if op == gc.ORSH {
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
+ } else {
+ p1 = gins(arm.AEOR, &n2, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
+ }
+
+ p1.Scond = arm.C_SCOND_HS
+ p2.Scond = arm.C_SCOND_LO
+ } else {
+ p1 = gins(arm.AEOR, &n2, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
+ p1.Scond = arm.C_SCOND_HS
+ p2.Scond = arm.C_SCOND_LO
+ }
+
+ regfree(&n3)
+
+ gc.Patch(p3, gc.Pc)
+
+ // Left-shift of smaller word must be sign/zero-extended.
+ if w < 32 && op == gc.OLSH {
+ gins(optoas(gc.OAS, nl.Type), &n2, &n2)
+ }
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint32
+ var c uint32
+ var q uint32
+ var dst gc.Node
+ var nc gc.Node
+ var nz gc.Node
+ var end gc.Node
+ var r0 gc.Node
+ var r1 gc.Node
+ var f *gc.Node
+ var p *obj.Prog
+ var pl *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = uint32(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) != 0 {
+ return
+ }
+
+ c = w % 4 // bytes
+ q = w / 4 // quads
+
+ r0.Op = gc.OREGISTER
+
+ r0.Val.U.Reg = REGALLOC_R0
+ r1.Op = gc.OREGISTER
+ r1.Val.U.Reg = REGALLOC_R0 + 1
+ regalloc(&dst, gc.Types[gc.Tptr], &r1)
+ agen(nl, &dst)
+ gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
+ regalloc(&nz, gc.Types[gc.TUINT32], &r0)
+ cgen(&nc, &nz)
+
+ if q > 128 {
+ regalloc(&end, gc.Types[gc.Tptr], nil)
+ p = gins(arm.AMOVW, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(q) * 4
+
+ p = gins(arm.AMOVW, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 4
+ p.Scond |= arm.C_PBIT
+ pl = p
+
+ p = gins(arm.ACMP, &dst, nil)
+ raddr(&end, p)
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
+
+ regfree(&end)
+ } else if q >= 4 && !gc.Nacl {
+ f = gc.Sysfunc("duffzero")
+ p = gins(obj.ADUFFZERO, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 4 and 128 = magic constants: see ../../runtime/asm_arm.s
+ p.To.Offset = 4 * (128 - int64(q))
+ } else {
+ for q > 0 {
+ p = gins(arm.AMOVW, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 4
+ p.Scond |= arm.C_PBIT
+
+ //print("1. %P\n", p);
+ q--
+ }
+ }
+
+ for c > 0 {
+ p = gins(arm.AMOVB, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 1
+ p.Scond |= arm.C_PBIT
+
+ //print("2. %P\n", p);
+ c--
+ }
+
+ regfree(&dst)
+ regfree(&nz)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var reg int
+ var p *obj.Prog
+ var p1 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatal("invalid nil check %v", p)
+ }
+ reg = int(p.From.Reg)
+
+ // check is
+ // CMP arg, $0
+ // MOV.EQ arg, 0(arg)
+ p1 = gc.Ctxt.NewProg()
+
+ gc.Clearp(p1)
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p1.Pc = 9999
+ p1.As = arm.AMOVW
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = int16(reg)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = int16(reg)
+ p1.To.Offset = 0
+ p1.Scond = arm.C_SCOND_EQ
+ p.As = arm.ACMP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Reg = 0
+ p.From.Offset = 0
+ p.Reg = int16(reg)
+ }
+}
diff --git a/src/cmd/new5g/gsubr.go b/src/cmd/new5g/gsubr.go
new file mode 100644
index 0000000000..c1ca679045
--- /dev/null
+++ b/src/cmd/new5g/gsubr.go
@@ -0,0 +1,1599 @@
+// Derived from Inferno utils/5c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 5l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int = 4096
+
+var resvd = []int{
+ 9, // reserved for m
+ 10, // reserved for g
+ arm.REGSP, // reserved for SP
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 0
+ }
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+}
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() int {
+ var i int
+ var j int
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return 1
+ ok:
+ }
+
+ return 0
+}
+
+var regpc [REGALLOC_FMAX + 1]uint32
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+ var fixfree int
+ var floatfree int
+
+ if false && gc.Debug['r'] != 0 {
+ fixfree = 0
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ fixfree++
+ }
+ }
+ floatfree = 0
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ floatfree++
+ }
+ }
+ fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
+ }
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+ if gc.Is64(t) != 0 {
+ gc.Fatal("regalloc: 64 bit type %v")
+ }
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TPTR32,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= REGALLOC_R0 && i <= REGALLOC_RMAX {
+ goto out
+ }
+ }
+
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ regpc[i] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ fmt.Printf("registers allocated at\n")
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ fmt.Printf("%d %p\n", i, regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+ goto err
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= REGALLOC_F0 && i <= REGALLOC_FMAX {
+ goto out
+ }
+ }
+
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ gc.Fatal("out of floating point registers")
+ goto err
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
+
+err:
+ gc.Nodreg(n, t, arm.REG_R0)
+ return
+
+out:
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+ var fixfree int
+ var floatfree int
+
+ if false && gc.Debug['r'] != 0 {
+ fixfree = 0
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ fixfree++
+ }
+ }
+ floatfree = 0
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ floatfree++
+ }
+ }
+ fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
+ }
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == arm.REGSP {
+ return
+ }
+ if i < 0 || i >= len(reg) || i >= len(regpc) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg %v not allocated", gc.Ctxt.Rconv(i))
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regpc[i] = 0
+ }
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+ if ncon_n.Type == nil {
+ gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+ }
+ gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
+ return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+ var n1 gc.Node
+ var i int64
+
+ if !(gc.Is64(n.Type) != 0) {
+ gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
+ }
+
+ if nsclean >= len(sclean) {
+ gc.Fatal("split64 clean")
+ }
+ sclean[nsclean].Op = gc.OEMPTY
+ nsclean++
+ switch n.Op {
+ default:
+ switch n.Op {
+ default:
+ if !(dotaddable(n, &n1) != 0) {
+ igen(n, &n1, nil)
+ sclean[nsclean-1] = n1
+ }
+
+ n = &n1
+
+ case gc.ONAME:
+ if n.Class == gc.PPARAMREF {
+ cgen(n.Heapaddr, &n1)
+ sclean[nsclean-1] = n1
+ n = &n1
+ }
+
+ // nothing
+ case gc.OINDREG:
+ break
+ }
+
+ *lo = *n
+ *hi = *n
+ lo.Type = gc.Types[gc.TUINT32]
+ if n.Type.Etype == gc.TINT64 {
+ hi.Type = gc.Types[gc.TINT32]
+ } else {
+ hi.Type = gc.Types[gc.TUINT32]
+ }
+ hi.Xoffset += 4
+
+ case gc.OLITERAL:
+ gc.Convconst(&n1, n.Type, &n.Val)
+ i = gc.Mpgetfix(n1.Val.U.Xval)
+ gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+ i >>= 32
+ if n.Type.Etype == gc.TINT64 {
+ gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+ } else {
+ gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+ }
+ }
+}
+
+func splitclean() {
+ if nsclean <= 0 {
+ gc.Fatal("splitclean")
+ }
+ nsclean--
+ if sclean[nsclean].Op != gc.OEMPTY {
+ regfree(&sclean[nsclean])
+ }
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var fa int
+ var ta int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var flo gc.Node
+ var fhi gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands;
+ // except 64-bit, which always copies via registers anyway.
+ if !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ switch tt {
+ default:
+ gc.Convconst(&con, t.Type, &f.Val)
+
+ case gc.TINT16,
+ gc.TINT8:
+ gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(arm.AMOVW, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TUINT16,
+ gc.TUINT8:
+ gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(arm.AMOVW, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+ }
+
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+
+ // constants can't move directly to memory
+ if gc.Ismem(t) != 0 && !(gc.Is64(t.Type) != 0) {
+ goto hard
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8: // same size
+ if !(gc.Ismem(f) != 0) {
+ a = arm.AMOVB
+ break
+ }
+ fallthrough
+
+ case gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8, // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ case gc.TUINT8<<16 | gc.TUINT8:
+ if !(gc.Ismem(f) != 0) {
+ a = arm.AMOVB
+ break
+ }
+ fallthrough
+
+ case gc.TINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+
+ case gc.TINT64<<16 | gc.TINT8, // truncate low word
+ gc.TUINT64<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ goto trunc64
+
+ case gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+ goto trunc64
+
+ case gc.TINT16<<16 | gc.TINT16: // same size
+ if !(gc.Ismem(f) != 0) {
+ a = arm.AMOVH
+ break
+ }
+ fallthrough
+
+ case gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16, // truncate
+ gc.TUINT32<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ case gc.TUINT16<<16 | gc.TUINT16:
+ if !(gc.Ismem(f) != 0) {
+ a = arm.AMOVH
+ break
+ }
+ fallthrough
+
+ case gc.TINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+
+ case gc.TINT64<<16 | gc.TINT16, // truncate low word
+ gc.TUINT64<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ goto trunc64
+
+ case gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+ goto trunc64
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = arm.AMOVW
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ split64(f, &flo, &fhi)
+
+ regalloc(&r1, t.Type, nil)
+ gins(arm.AMOVW, &flo, &r1)
+ gins(arm.AMOVW, &r1, t)
+ regfree(&r1)
+ splitclean()
+ return
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ split64(f, &flo, &fhi)
+
+ split64(t, &tlo, &thi)
+ regalloc(&r1, flo.Type, nil)
+ regalloc(&r2, fhi.Type, nil)
+ gins(arm.AMOVW, &flo, &r1)
+ gins(arm.AMOVW, &fhi, &r2)
+ gins(arm.AMOVW, &r1, &tlo)
+ gins(arm.AMOVW, &r2, &thi)
+ regfree(&r1)
+ regfree(&r2)
+ splitclean()
+ splitclean()
+ return
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = arm.AMOVBS
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64, // convert via int32
+ gc.TINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = arm.AMOVBU
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = arm.AMOVHS
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64, // convert via int32
+ gc.TINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = arm.AMOVHU
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ regalloc(&r1, tlo.Type, nil)
+ regalloc(&r2, thi.Type, nil)
+ gmove(f, &r1)
+ p1 = gins(arm.AMOVW, &r1, &r2)
+ p1.From.Type = obj.TYPE_SHIFT
+ p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31
+ p1.From.Reg = 0
+
+ //print("gmove: %P\n", p1);
+ gins(arm.AMOVW, &r1, &tlo)
+
+ gins(arm.AMOVW, &r2, &thi)
+ regfree(&r1)
+ regfree(&r2)
+ splitclean()
+ return
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gmove(f, &tlo)
+ regalloc(&r1, thi.Type, nil)
+ gins(arm.AMOVW, ncon(0), &r1)
+ gins(arm.AMOVW, &r1, &thi)
+ regfree(&r1)
+ splitclean()
+ return
+
+ // case CASE(TFLOAT64, TUINT64):
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TUINT32,
+
+ // case CASE(TFLOAT32, TUINT64):
+
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ fa = arm.AMOVF
+
+ a = arm.AMOVFW
+ if ft == gc.TFLOAT64 {
+ fa = arm.AMOVD
+ a = arm.AMOVDW
+ }
+
+ ta = arm.AMOVW
+ switch tt {
+ case gc.TINT8:
+ ta = arm.AMOVBS
+
+ case gc.TUINT8:
+ ta = arm.AMOVBU
+
+ case gc.TINT16:
+ ta = arm.AMOVHS
+
+ case gc.TUINT16:
+ ta = arm.AMOVHU
+ }
+
+ regalloc(&r1, gc.Types[ft], f)
+ regalloc(&r2, gc.Types[tt], t)
+ gins(fa, f, &r1) // load to fpu
+ p1 = gins(a, &r1, &r1) // convert to w
+ switch tt {
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ p1.Scond |= arm.C_UBIT
+ }
+
+ gins(arm.AMOVW, &r1, &r2) // copy to cpu
+ gins(ta, &r2, t) // store
+ regfree(&r1)
+ regfree(&r2)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ fa = arm.AMOVW
+
+ switch ft {
+ case gc.TINT8:
+ fa = arm.AMOVBS
+
+ case gc.TUINT8:
+ fa = arm.AMOVBU
+
+ case gc.TINT16:
+ fa = arm.AMOVHS
+
+ case gc.TUINT16:
+ fa = arm.AMOVHU
+ }
+
+ a = arm.AMOVWF
+ ta = arm.AMOVF
+ if tt == gc.TFLOAT64 {
+ a = arm.AMOVWD
+ ta = arm.AMOVD
+ }
+
+ regalloc(&r1, gc.Types[ft], f)
+ regalloc(&r2, gc.Types[tt], t)
+ gins(fa, f, &r1) // load to cpu
+ gins(arm.AMOVW, &r1, &r2) // copy to fpu
+ p1 = gins(a, &r2, &r2) // convert
+ switch ft {
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ p1.Scond |= arm.C_UBIT
+ }
+
+ gins(ta, &r2, t) // store
+ regfree(&r1)
+ regfree(&r2)
+ return
+
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ gc.Fatal("gmove UINT64, TFLOAT not implemented")
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = arm.AMOVF
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = arm.AMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gins(arm.AMOVF, f, &r1)
+ gins(arm.AMOVFD, &r1, &r1)
+ gins(arm.AMOVD, &r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gins(arm.AMOVD, f, &r1)
+ gins(arm.AMOVDF, &r1, &r1)
+ gins(arm.AMOVF, &r1, t)
+ regfree(&r1)
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // TODO(kaib): we almost always require a register dest anyway, this can probably be
+ // removed.
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // truncate 64 bit integer
+trunc64:
+ split64(f, &flo, &fhi)
+
+ regalloc(&r1, t.Type, nil)
+ gins(a, &flo, &r1)
+ gins(a, &r1, t)
+ regfree(&r1)
+ splitclean()
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+}
+
+func samaddr(f *gc.Node, t *gc.Node) int {
+ if f.Op != t.Op {
+ return 0
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ var af obj.Addr
+ // Node nod;
+ // int32 v;
+
+ var at obj.Addr
+
+ if f != nil && f.Op == gc.OINDEX {
+ gc.Fatal("gins OINDEX not implemented")
+ }
+
+ // regalloc(&nod, &regnode, Z);
+ // v = constnode.vconst;
+ // cgen(f->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ if t != nil && t.Op == gc.OINDEX {
+ gc.Fatal("gins OINDEX not implemented")
+ }
+
+ // regalloc(&nod, &regnode, Z);
+ // v = constnode.vconst;
+ // cgen(t->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ af = obj.Addr{}
+
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+ return p
+}
+
+/*
+ * insert n into reg slot of p
+ */
+func raddr(n *gc.Node, p *obj.Prog) {
+ var a obj.Addr
+
+ gc.Naddr(n, &a, 1)
+ if a.Type != obj.TYPE_REG {
+ if n != nil {
+ gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ } else {
+ gc.Fatal("bad in raddr: <null>")
+ }
+ p.Reg = 0
+ } else {
+ p.Reg = a.Reg
+ }
+}
+
+/* generate a comparison
+TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
+*/
+func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+
+ if lhs.Op != gc.OREGISTER {
+ gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
+ }
+
+ p = gins(as, rhs, nil)
+ raddr(lhs, p)
+ return p
+}
+
+/* generate a constant shift
+ * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
+ */
+func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+
+ if sval <= 0 || sval > 32 {
+ gc.Fatal("bad shift value: %d", sval)
+ }
+
+ sval = sval & 0x1f
+
+ p = gins(as, nil, rhs)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15
+ return p
+}
+
+/* generate a register shift
+ */
+func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ p = gins(as, nil, rhs)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15
+ return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0))
+
+ /* case CASE(OADDR, TPTR32):
+ a = ALEAL;
+ break;
+
+ case CASE(OADDR, TPTR64):
+ a = ALEAQ;
+ break;
+ */
+ // TODO(kaib): make sure the conditional branches work on all edge cases
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = arm.ABEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = arm.ABNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = arm.ABLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = arm.ABLO
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = arm.ABLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = arm.ABLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64,
+ gc.OGT<<16 | gc.TFLOAT32,
+ gc.OGT<<16 | gc.TFLOAT64:
+ a = arm.ABGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64:
+ a = arm.ABHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64,
+ gc.OGE<<16 | gc.TFLOAT32,
+ gc.OGE<<16 | gc.TFLOAT64:
+ a = arm.ABGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64:
+ a = arm.ABHS
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8,
+ gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16,
+ gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = arm.ACMP
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = arm.ACMPF
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = arm.ACMPD
+
+ case gc.OAS<<16 | gc.TBOOL:
+ a = arm.AMOVB
+
+ case gc.OAS<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ case gc.OAS<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+
+ case gc.OAS<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ case gc.OAS<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = arm.AMOVW
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = arm.AMOVF
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = arm.AMOVD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8,
+ gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16,
+ gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = arm.AADD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = arm.AADDF
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = arm.AADDD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8,
+ gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16,
+ gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = arm.ASUB
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = arm.ASUBF
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = arm.ASUBD
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8,
+ gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16,
+ gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = arm.ARSB
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8,
+ gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16,
+ gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = arm.AAND
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8,
+ gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16,
+ gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = arm.AORR
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8,
+ gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16,
+ gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = arm.AEOR
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8,
+ gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16,
+ gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = arm.ASLL
+
+ case gc.ORSH<<16 | gc.TUINT8,
+ gc.ORSH<<16 | gc.TUINT16,
+ gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = arm.ASRL
+
+ case gc.ORSH<<16 | gc.TINT8,
+ gc.ORSH<<16 | gc.TINT16,
+ gc.ORSH<<16 | gc.TINT32:
+ a = arm.ASRA
+
+ case gc.OMUL<<16 | gc.TUINT8,
+ gc.OMUL<<16 | gc.TUINT16,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = arm.AMULU
+
+ case gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT32:
+ a = arm.AMUL
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = arm.AMULF
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = arm.AMULD
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.ODIV<<16 | gc.TUINT16,
+ gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32:
+ a = arm.ADIVU
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.ODIV<<16 | gc.TINT16,
+ gc.ODIV<<16 | gc.TINT32:
+ a = arm.ADIV
+
+ case gc.OMOD<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = arm.AMODU
+
+ case gc.OMOD<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT32:
+ a = arm.AMOD
+
+ // case CASE(OEXTEND, TINT16):
+ // a = ACWD;
+ // break;
+
+ // case CASE(OEXTEND, TINT32):
+ // a = ACDQ;
+ // break;
+
+ // case CASE(OEXTEND, TINT64):
+ // a = ACQO;
+ // break;
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = arm.ADIVF
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = arm.ADIVD
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OPtrto = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func sudoclean() {
+ if clean[cleani-1].Op != gc.OEMPTY {
+ regfree(&clean[cleani-1])
+ }
+ if clean[cleani-2].Op != gc.OEMPTY {
+ regfree(&clean[cleani-2])
+ }
+ cleani -= 2
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) int {
+ var o int
+ var oary [10]int64
+ var nn *gc.Node
+
+ if n.Op != gc.ODOT {
+ return 0
+ }
+
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ *n1 = *nn
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
+ var o int
+ var i int
+ var oary [10]int64
+ var v int64
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var nn *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var reg *gc.Node
+ var reg1 *gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var t *gc.Type
+
+ if n.Type == nil {
+ return 0
+ }
+
+ *a = obj.Addr{}
+
+ switch n.Op {
+ case gc.OLITERAL:
+ if !(gc.Isconst(n, gc.CTINT) != 0) {
+ break
+ }
+ v = gc.Mpgetfix(n.Val.U.Xval)
+ if v >= 32000 || v <= -32000 {
+ break
+ }
+ goto lit
+
+ case gc.ODOT,
+ gc.ODOTPTR:
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto odot
+
+ case gc.OINDEX:
+ return 0
+
+ // disabled: OINDEX case is now covered by agenr
+ // for a more suitable register allocation pattern.
+ if n.Left.Type.Etype == gc.TSTRING {
+ return 0
+ }
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto oindex
+ }
+
+ return 0
+
+lit:
+ switch as {
+ default:
+ return 0
+
+ case arm.AADD,
+ arm.ASUB,
+ arm.AAND,
+ arm.AORR,
+ arm.AEOR,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVW:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ goto yes
+
+odot:
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ goto no
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 = *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ goto yes
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i = 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(arm.AMOVW, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
+ n1.Type = n.Type
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex:
+ l = n.Left
+ r = n.Right
+ if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
+ goto no
+ }
+
+ // set o to type of array
+ o = 0
+
+ if gc.Isptr[l.Type.Etype] != 0 {
+ o += OPtrto
+ if l.Type.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ptr ary")
+ }
+ if l.Type.Type.Bound < 0 {
+ o += ODynam
+ }
+ } else {
+ if l.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ary")
+ }
+ if l.Type.Bound < 0 {
+ o += ODynam
+ }
+ }
+
+ *w = int(n.Type.Width)
+ if gc.Isconst(r, gc.CTINT) != 0 {
+ goto oindex_const
+ }
+
+ switch *w {
+ default:
+ goto no
+
+ case 1,
+ 2,
+ 4,
+ 8:
+ break
+ }
+
+ // load the array (reg)
+ if l.Ullman > r.Ullman {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+ }
+
+ // load the index (reg1)
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[r.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+ regalloc(reg1, t, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], reg1)
+ p2 = cgenindex(r, &n3, bool2int(gc.Debug['B'] != 0 || n.Bounded != 0))
+ gmove(&n3, reg1)
+ regfree(&n3)
+
+ // load the array (reg)
+ if l.Ullman <= r.Ullman {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+ }
+
+ // check bounds
+ if !(gc.Debug['B'] != 0) {
+ if o&ODynam != 0 {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Tptr]
+ n2.Xoffset = int64(gc.Array_nel)
+ } else {
+ if o&OPtrto != 0 {
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Type.Bound)
+ } else {
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Bound)
+ }
+ }
+
+ regalloc(&n3, n2.Type, nil)
+ cgen(&n2, &n3)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), reg1, &n3)
+ regfree(&n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if o&ODynam != 0 {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Tptr]
+ n2.Xoffset = int64(gc.Array_array)
+ gmove(&n2, reg)
+ }
+
+ switch *w {
+ case 1:
+ gins(arm.AADD, reg1, reg)
+
+ case 2:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 1, reg)
+
+ case 4:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 2, reg)
+
+ case 8:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 3, reg)
+ }
+
+ gc.Naddr(reg1, a, 1)
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ a.Offset = 0
+ goto yes
+
+ // index is constant
+ // can check statically and
+ // can multiply by width statically
+
+oindex_const:
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+
+ v = gc.Mpgetfix(r.Val.U.Xval)
+ if o&ODynam != 0 {
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], v)
+ regalloc(&n3, gc.Types[gc.TUINT32], nil)
+ cgen(&n2, &n3)
+ regalloc(&n4, n1.Type, nil)
+ cgen(&n1, &n4)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n3)
+ regfree(&n4)
+ regfree(&n3)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, reg)
+ }
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * int64(*w)
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+
+yes:
+ return 1
+
+no:
+ sudoclean()
+ return 0
+}
diff --git a/src/cmd/new5g/peep.go b/src/cmd/new5g/peep.go
new file mode 100644
index 0000000000..7534aae6bd
--- /dev/null
+++ b/src/cmd/new5g/peep.go
@@ -0,0 +1,1868 @@
+// Inferno utils/5c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+// UNUSED
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ /*
+ * elide shift into TYPE_SHIFT operand of subsequent instruction
+ */
+ // if(shiftprop(r)) {
+ // excise(r);
+ // t++;
+ // break;
+ // }
+ case arm.ASLL,
+ arm.ASRL,
+ arm.ASRA:
+ break
+
+ case arm.AMOVB,
+ arm.AMOVH,
+ arm.AMOVW,
+ arm.AMOVF,
+ arm.AMOVD:
+ if regtyp(&p.From) != 0 {
+ if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
+ if p.Scond == arm.C_SCOND_NONE {
+ if copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ break
+ }
+
+ if subprop(r) != 0 && copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ break
+ }
+ }
+ }
+ }
+
+ case arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVBS,
+ arm.AMOVBU:
+ if p.From.Type == obj.TYPE_REG {
+ if shortprop(r) != 0 {
+ t++
+ }
+ }
+ }
+ }
+
+ /*
+ if(p->scond == C_SCOND_NONE)
+ if(regtyp(&p->to))
+ if(isdconst(&p->from)) {
+ constprop(&p->from, &p->to, r->s1);
+ }
+ break;
+ */
+ if t != 0 {
+ goto loop1
+ }
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ /*
+ * EOR -1,x,y => MVN x,y
+ */
+ case arm.AEOR:
+ if isdconst(&p.From) != 0 && p.From.Offset == -1 {
+ p.As = arm.AMVN
+ p.From.Type = obj.TYPE_REG
+ if p.Reg != 0 {
+ p.From.Reg = p.Reg
+ } else {
+ p.From.Reg = p.To.Reg
+ }
+ p.Reg = 0
+ }
+ }
+ }
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case arm.AMOVW,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU:
+ if p.From.Type == obj.TYPE_MEM && p.From.Offset == 0 {
+ xtramodes(g, r, &p.From)
+ } else if p.To.Type == obj.TYPE_MEM && p.To.Offset == 0 {
+ xtramodes(g, r, &p.To)
+ } else {
+ continue
+ }
+ }
+ }
+
+ // case ACMP:
+ // /*
+ // * elide CMP $0,x if calculation of x can set condition codes
+ // */
+ // if(isdconst(&p->from) || p->from.offset != 0)
+ // continue;
+ // r2 = r->s1;
+ // if(r2 == nil)
+ // continue;
+ // t = r2->prog->as;
+ // switch(t) {
+ // default:
+ // continue;
+ // case ABEQ:
+ // case ABNE:
+ // case ABMI:
+ // case ABPL:
+ // break;
+ // case ABGE:
+ // t = ABPL;
+ // break;
+ // case ABLT:
+ // t = ABMI;
+ // break;
+ // case ABHI:
+ // t = ABNE;
+ // break;
+ // case ABLS:
+ // t = ABEQ;
+ // break;
+ // }
+ // r1 = r;
+ // do
+ // r1 = uniqp(r1);
+ // while (r1 != nil && r1->prog->as == ANOP);
+ // if(r1 == nil)
+ // continue;
+ // p1 = r1->prog;
+ // if(p1->to.type != TYPE_REG)
+ // continue;
+ // if(p1->to.reg != p->reg)
+ // if(!(p1->as == AMOVW && p1->from.type == TYPE_REG && p1->from.reg == p->reg))
+ // continue;
+ //
+ // switch(p1->as) {
+ // default:
+ // continue;
+ // case AMOVW:
+ // if(p1->from.type != TYPE_REG)
+ // continue;
+ // case AAND:
+ // case AEOR:
+ // case AORR:
+ // case ABIC:
+ // case AMVN:
+ // case ASUB:
+ // case ARSB:
+ // case AADD:
+ // case AADC:
+ // case ASBC:
+ // case ARSC:
+ // break;
+ // }
+ // p1->scond |= C_SBIT;
+ // r2->prog->as = t;
+ // excise(r);
+ // continue;
+
+ // predicate(g);
+
+ gc.Flowend(g)
+}
+
+func regtyp(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15))
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !(regtyp(v1) != 0) {
+ return 0
+ }
+ v2 = &p.To
+ if !(regtyp(v2) != 0) {
+ return 0
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return 0
+ }
+
+ if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
+ info.Flags |= gc.RegRead
+ info.Flags &^= (gc.CanRegRead | gc.RightRead)
+ p.Reg = p.To.Reg
+ }
+
+ switch p.As {
+ case arm.AMULLU,
+ arm.AMULA,
+ arm.AMVN:
+ return 0
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ if p.Scond == arm.C_SCOND_NONE {
+ goto gotit
+ }
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return 0
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return 1
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) != 0 {
+ return 1
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return 1
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if !(f != 0) && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return 0
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if !(gc.Debug['P'] != 0) {
+ return 0
+ }
+ if t == 4 {
+ fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return 0
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return 0
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+ }
+ }
+
+ if !(f != 0) {
+ t = copyu(p, v1, nil)
+ if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !(copy1(v1, v2, r.S2, f) != 0) {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+// UNUSED
+/*
+ * The idea is to remove redundant constants.
+ * $c1->v1
+ * ($c1->v2 s/$c1/v1)*
+ * set v1 return
+ * The v1->v2 should be eliminated by copy propagation.
+ */
+func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
+ var p *obj.Prog
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if gc.Uniqp(r) == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; return\n")
+ }
+ return
+ }
+
+ if p.As == arm.AMOVW && copyas(&p.From, c1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
+ }
+ p.From = *v1
+ } else if copyu(p, v1, nil) > 1 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1))
+ }
+ return
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ constprop(c1, v1, r.S2)
+ }
+ }
+}
+
+/*
+ * shortprop eliminates redundant zero/sign extensions.
+ *
+ * MOVBS x, R
+ * <no use R>
+ * MOVBS R, R'
+ *
+ * changed to
+ *
+ * MOVBS x, R
+ * ...
+ * MOVB R, R' (compiled to mov)
+ *
+ * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
+ */
+func shortprop(r *gc.Flow) int {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var r1 *gc.Flow
+
+ p = r.Prog
+ r1 = findpre(r, &p.From)
+ if r1 == nil {
+ return 0
+ }
+
+ p1 = r1.Prog
+ if p1.As == p.As {
+ // Two consecutive extensions.
+ goto gotit
+ }
+
+ if p1.As == arm.AMOVW && isdconst(&p1.From) != 0 && p1.From.Offset >= 0 && p1.From.Offset < 128 {
+ // Loaded an immediate.
+ goto gotit
+ }
+
+ return 0
+
+gotit:
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("shortprop\n%v\n%v", p1, p)
+ }
+ switch p.As {
+ case arm.AMOVBS,
+ arm.AMOVBU:
+ p.As = arm.AMOVB
+
+ case arm.AMOVHS,
+ arm.AMOVHU:
+ p.As = arm.AMOVH
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf(" => %v\n", arm.Aconv(int(p.As)))
+ }
+ return 1
+}
+
+// UNUSED
+/*
+ * ASLL x,y,w
+ * .. (not use w, not set x y w)
+ * AXXX w,a,b (a != w)
+ * .. (not use w)
+ * (set w)
+ * ----------- changed to
+ * ..
+ * AXXX (x<<y),a,b
+ * ..
+ */
+func shiftprop(r *gc.Flow) int {
+ var r1 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var n int
+ var o int
+ var a obj.Addr
+
+ p = r.Prog
+ if p.To.Type != obj.TYPE_REG {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
+ }
+ return 0
+ }
+
+ n = int(p.To.Reg)
+ a = obj.Zprog.From
+ if p.Reg != 0 && p.Reg != p.To.Reg {
+ a.Type = obj.TYPE_REG
+ a.Reg = p.Reg
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("shiftprop\n%v", p)
+ }
+ r1 = r
+ for {
+ /* find first use of shift result; abort if shift operands or result are changed */
+ r1 = gc.Uniqs(r1)
+
+ if r1 == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tbranch; FAILURE\n")
+ }
+ return 0
+ }
+
+ if gc.Uniqp(r1) == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tmerge; FAILURE\n")
+ }
+ return 0
+ }
+
+ p1 = r1.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n%v", p1)
+ }
+ switch copyu(p1, &p.To, nil) {
+ case 0: /* not used or set */
+ if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\targs modified; FAILURE\n")
+ }
+ return 0
+ }
+
+ continue
+ case 3: /* set, not used */
+ {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: noref; FAILURE\n")
+ }
+ return 0
+ }
+ }
+
+ break
+ }
+
+ /* check whether substitution can be done */
+ switch p1.As {
+ default:
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tnon-dpi; FAILURE\n")
+ }
+ return 0
+
+ case arm.AAND,
+ arm.AEOR,
+ arm.AADD,
+ arm.AADC,
+ arm.AORR,
+ arm.ASUB,
+ arm.ASBC,
+ arm.ARSB,
+ arm.ARSC:
+ if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) {
+ if p1.From.Type != obj.TYPE_REG {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tcan't swap; FAILURE\n")
+ }
+ return 0
+ }
+
+ p1.Reg = p1.From.Reg
+ p1.From.Reg = int16(n)
+ switch p1.As {
+ case arm.ASUB:
+ p1.As = arm.ARSB
+
+ case arm.ARSB:
+ p1.As = arm.ASUB
+
+ case arm.ASBC:
+ p1.As = arm.ARSC
+
+ case arm.ARSC:
+ p1.As = arm.ASBC
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\t=>%v", p1)
+ }
+ }
+ fallthrough
+
+ case arm.ABIC,
+ arm.ATST,
+ arm.ACMP,
+ arm.ACMN:
+ if int(p1.Reg) == n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tcan't swap; FAILURE\n")
+ }
+ return 0
+ }
+
+ if p1.Reg == 0 && int(p1.To.Reg) == n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tshift result used twice; FAILURE\n")
+ }
+ return 0
+ }
+
+ // case AMVN:
+ if p1.From.Type == obj.TYPE_SHIFT {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tshift result used in shift; FAILURE\n")
+ }
+ return 0
+ }
+
+ if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
+ }
+ return 0
+ }
+ }
+
+ /* check whether shift result is used subsequently */
+ p2 = p1
+
+ if int(p1.To.Reg) != n {
+ for {
+ r1 = gc.Uniqs(r1)
+ if r1 == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tinconclusive; FAILURE\n")
+ }
+ return 0
+ }
+
+ p1 = r1.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n%v", p1)
+ }
+ switch copyu(p1, &p.To, nil) {
+ case 0: /* not used or set */
+ continue
+
+ case 3: /* set, not used */
+ break
+
+ default: /* used */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\treused; FAILURE\n")
+ }
+ return 0
+ }
+
+ break
+ }
+ }
+
+ /* make the substitution */
+ p2.From.Reg = 0
+
+ o = int(p.Reg)
+ if o == 0 {
+ o = int(p.To.Reg)
+ }
+ o &= 15
+
+ switch p.From.Type {
+ case obj.TYPE_CONST:
+ o |= int((p.From.Offset & 0x1f) << 7)
+
+ case obj.TYPE_REG:
+ o |= 1<<4 | (int(p.From.Reg)&15)<<8
+ }
+
+ switch p.As {
+ case arm.ASLL:
+ o |= 0 << 5
+
+ case arm.ASRL:
+ o |= 1 << 5
+
+ case arm.ASRA:
+ o |= 2 << 5
+ }
+
+ p2.From = obj.Zprog.From
+ p2.From.Type = obj.TYPE_SHIFT
+ p2.From.Offset = int64(o)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\t=>%v\tSUCCEED\n", p2)
+ }
+ return 1
+}
+
+/*
+ * findpre returns the last instruction mentioning v
+ * before r. It must be a set, and there must be
+ * a unique path from that instruction to r.
+ */
+func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow {
+ var r1 *gc.Flow
+
+ for r1 = gc.Uniqp(r); r1 != nil; (func() { r = r1; r1 = gc.Uniqp(r) })() {
+ if gc.Uniqs(r1) != r {
+ return nil
+ }
+ switch copyu(r1.Prog, v, nil) {
+ case 1, /* used */
+ 2: /* read-alter-rewrite */
+ return nil
+
+ case 3, /* set */
+ 4: /* set and used */
+ return r1
+ }
+ }
+
+ return nil
+}
+
+/*
+ * findinc finds ADD instructions with a constant
+ * argument which falls within the immed_12 range.
+ */
+func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
+ var r1 *gc.Flow
+ var p *obj.Prog
+
+ for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; (func() { r = r1; r1 = gc.Uniqs(r) })() {
+ if gc.Uniqp(r1) != r {
+ return nil
+ }
+ switch copyu(r1.Prog, v, nil) {
+ case 0: /* not touched */
+ continue
+
+ case 4: /* set and used */
+ p = r1.Prog
+
+ if p.As == arm.AADD {
+ if isdconst(&p.From) != 0 {
+ if p.From.Offset > -4096 && p.From.Offset < 4096 {
+ return r1
+ }
+ }
+ }
+ fallthrough
+
+ default:
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) int {
+ var a [3]obj.Addr
+ var i int
+ var n int
+
+ if r == r2 {
+ return 1
+ }
+ n = 0
+ if p.Reg != 0 && p.Reg != p.To.Reg {
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = p.Reg
+ n++
+ }
+
+ switch p.From.Type {
+ case obj.TYPE_SHIFT:
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = int16(arm.REG_R0 + (p.From.Offset & 0xf))
+ n++
+ fallthrough
+
+ case obj.TYPE_REG:
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = p.From.Reg
+ n++
+ }
+
+ if n == 0 {
+ return 1
+ }
+ for ; r != nil && r != r2; r = gc.Uniqs(r) {
+ p = r.Prog
+ for i = 0; i < n; i++ {
+ if copyu(p, &a[i], nil) > 1 {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+func findu1(r *gc.Flow, v *obj.Addr) int {
+ for ; r != nil; r = r.S1 {
+ if r.Active != 0 {
+ return 0
+ }
+ r.Active = 1
+ switch copyu(r.Prog, v, nil) {
+ case 1, /* used */
+ 2, /* read-alter-rewrite */
+ 4: /* set and used */
+ return 1
+
+ case 3: /* set */
+ return 0
+ }
+
+ if r.S2 != nil {
+ if findu1(r.S2, v) != 0 {
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) int {
+ var r1 *gc.Flow
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+ return findu1(r, v)
+}
+
+/*
+ * xtramodes enables the ARM post increment and
+ * shift offset addressing modes to transform
+ * MOVW 0(R3),R1
+ * ADD $4,R3,R3
+ * into
+ * MOVW.P 4(R3),R1
+ * and
+ * ADD R0,R1
+ * MOVBU 0(R1),R0
+ * into
+ * MOVBU R0<<0(R1),R0
+ */
+func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
+ var r1 *gc.Flow
+ var r2 *gc.Flow
+ var r3 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var v obj.Addr
+
+ p = r.Prog
+ v = *a
+ v.Type = obj.TYPE_REG
+ r1 = findpre(r, &v)
+ if r1 != nil {
+ p1 = r1.Prog
+ if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
+ switch p1.As {
+ case arm.AADD:
+ if p1.Scond&arm.C_SBIT != 0 {
+ // avoid altering ADD.S/ADC sequences.
+ break
+ }
+
+ if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
+ if nochange(gc.Uniqs(r1), r, p1) != 0 {
+ if a != &p.From || v.Reg != p.To.Reg {
+ if finduse(g, r.S1, &v) != 0 {
+ if p1.Reg == 0 || p1.Reg == v.Reg {
+ /* pre-indexing */
+ p.Scond |= arm.C_WBIT
+ } else {
+ return 0
+ }
+ }
+ }
+
+ switch p1.From.Type {
+ /* register offset */
+ case obj.TYPE_REG:
+ if gc.Nacl {
+ return 0
+ }
+ *a = obj.Zprog.From
+ a.Type = obj.TYPE_SHIFT
+ a.Offset = int64(p1.From.Reg) & 15
+
+ /* scaled register offset */
+ case obj.TYPE_SHIFT:
+ if gc.Nacl {
+ return 0
+ }
+ *a = obj.Zprog.From
+ a.Type = obj.TYPE_SHIFT
+ fallthrough
+
+ /* immediate offset */
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ a.Offset = p1.From.Offset
+ }
+
+ if p1.Reg != 0 {
+ a.Reg = p1.Reg
+ }
+ excise(r1)
+ return 1
+ }
+ }
+
+ case arm.AMOVW:
+ if p1.From.Type == obj.TYPE_REG {
+ r2 = findinc(r1, r, &p1.From)
+ if r2 != nil {
+ for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
+ }
+ if r3 == r {
+ /* post-indexing */
+ p1 = r2.Prog
+
+ a.Reg = p1.To.Reg
+ a.Offset = p1.From.Offset
+ p.Scond |= arm.C_PBIT
+ if !(finduse(g, r, &r1.Prog.To) != 0) {
+ excise(r1)
+ }
+ excise(r2)
+ return 1
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if a != &p.From || a.Reg != p.To.Reg {
+ r1 = findinc(r, nil, &v)
+ if r1 != nil {
+ /* post-indexing */
+ p1 = r1.Prog
+
+ a.Offset = p1.From.Offset
+ p.Scond |= arm.C_PBIT
+ excise(r1)
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", arm.Aconv(int(p.As)))
+ return 2
+
+ case arm.AMOVM:
+ if v.Type != obj.TYPE_REG {
+ return 0
+ }
+ if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */
+ if s != nil {
+ if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1
+ }
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ if p.Scond&arm.C_WBIT != 0 {
+ return 2
+ }
+ return 1
+ }
+
+ if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1 /* read/rar, write reglist */
+ }
+ } else {
+ if s != nil {
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1
+ }
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.From, v) != 0 {
+ if p.Scond&arm.C_WBIT != 0 {
+ return 2
+ }
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 4
+ }
+ return 1
+ }
+
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 3
+ }
+ }
+
+ return 0
+
+ case obj.ANOP, /* read,, write */
+ arm.AMOVW,
+ arm.AMOVF,
+ arm.AMOVD,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVFW,
+ arm.AMOVWF,
+ arm.AMOVDW,
+ arm.AMOVWD,
+ arm.AMOVFD,
+ arm.AMOVDF:
+ if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 {
+ if v.Type == obj.TYPE_REG {
+ if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT {
+ if p.From.Reg == v.Reg {
+ return 2
+ }
+ } else {
+ if p.To.Reg == v.Reg {
+ return 2
+ }
+ }
+ }
+ }
+
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if !(copyas(&p.To, v) != 0) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) != 0 {
+ if p.Scond != arm.C_SCOND_NONE {
+ return 2
+ }
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case arm.AMULLU, /* read, read, write, write */
+ arm.AMULL,
+ arm.AMULA,
+ arm.AMVN:
+ return 2
+
+ case arm.AADD, /* read, read, write */
+ arm.AADC,
+ arm.ASUB,
+ arm.ASBC,
+ arm.ARSB,
+ arm.ASLL,
+ arm.ASRL,
+ arm.ASRA,
+ arm.AORR,
+ arm.AAND,
+ arm.AEOR,
+ arm.AMUL,
+ arm.AMULU,
+ arm.ADIV,
+ arm.ADIVU,
+ arm.AMOD,
+ arm.AMODU,
+ arm.AADDF,
+ arm.AADDD,
+ arm.ASUBF,
+ arm.ASUBD,
+ arm.AMULF,
+ arm.AMULD,
+ arm.ADIVF,
+ arm.ADIVD,
+ obj.ACHECKNIL,
+ /* read */
+ arm.ACMPF, /* read, read, */
+ arm.ACMPD,
+ arm.ACMP,
+ arm.ACMN,
+ arm.ACASE,
+ arm.ATST:
+ /* read,, */
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if copysub1(p, v, s, 1) != 0 {
+ return 1
+ }
+ if !(copyas(&p.To, v) != 0) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) != 0 {
+ if p.Scond != arm.C_SCOND_NONE {
+ return 2
+ }
+ if p.Reg == 0 {
+ p.Reg = p.To.Reg
+ }
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ if copyau1(p, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau1(p, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case arm.ABEQ, /* read, read */
+ arm.ABNE,
+ arm.ABCS,
+ arm.ABHS,
+ arm.ABCC,
+ arm.ABLO,
+ arm.ABMI,
+ arm.ABPL,
+ arm.ABVS,
+ arm.ABVC,
+ arm.ABHI,
+ arm.ABLS,
+ arm.ABGE,
+ arm.ABLT,
+ arm.ABGT,
+ arm.ABLE:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub1(p, v, s, 1)
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau1(p, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case arm.AB: /* funny */
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case obj.ARET: /* funny */
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case arm.ABL: /* funny */
+ if v.Type == obj.TYPE_REG {
+ // TODO(rsc): REG_R0 and REG_F0 used to be
+ // (when register numbers started at 0) exregoffset and exfregoffset,
+ // which are unset entirely.
+ // It's strange that this handles R0 and F0 differently from the other
+ // registers. Possible failure to optimize?
+ if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT {
+ return 2
+ }
+ if v.Reg == arm.REGARG {
+ return 2
+ }
+ if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT {
+ return 2
+ }
+ }
+
+ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 4
+ }
+ return 3
+
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R1 is ptr to memory, used and set, cannot be substituted.
+ case obj.ADUFFZERO:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == REGALLOC_R0 {
+ return 1
+ }
+ if v.Reg == REGALLOC_R0+1 {
+ return 2
+ }
+ }
+
+ return 0
+
+ // R0 is scratch, set by DUFFCOPY, cannot be substituted.
+ // R1, R2 areptr to src, dst, used and set, cannot be substituted.
+ case obj.ADUFFCOPY:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == REGALLOC_R0 {
+ return 3
+ }
+ if v.Reg == REGALLOC_R0+1 || v.Reg == REGALLOC_R0+2 {
+ return 2
+ }
+ }
+
+ return 0
+
+ case obj.ATEXT: /* funny */
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == arm.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL:
+ return 0
+ }
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) int {
+ if regtyp(v) != 0 {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return 1
+ }
+ }
+ } else if v.Type == obj.TYPE_CONST { /* for constprop */
+ if a.Type == v.Type {
+ if a.Name == v.Name {
+ if a.Sym == v.Sym {
+ if a.Reg == v.Reg {
+ if a.Offset == v.Offset {
+ return 1
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) int {
+ if a.Type != v.Type {
+ return 0
+ }
+ if regtyp(v) != 0 && a.Reg == v.Reg {
+ return 1
+ }
+
+ // TODO(rsc): Change v->type to v->name and enable.
+ //if(v->type == NAME_AUTO || v->type == NAME_PARAM) {
+ // if(v->offset == a->offset)
+ // return 1;
+ //}
+ return 0
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) int {
+ if copyas(a, v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
+ if a.Reg == v.Reg {
+ return 1
+ }
+ } else if a.Type == obj.TYPE_MEM {
+ if a.Reg == v.Reg {
+ return 1
+ }
+ } else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+ if a.Reg == v.Reg {
+ return 1
+ }
+ if a.Offset == int64(v.Reg) {
+ return 1
+ }
+ } else if a.Type == obj.TYPE_SHIFT {
+ if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+ return 1
+ }
+ if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+/*
+ * compare v to the center
+ * register in p (p->reg)
+ */
+func copyau1(p *obj.Prog, v *obj.Addr) int {
+ if v.Type == obj.TYPE_REG && v.Reg == 0 {
+ return 0
+ }
+ return bool2int(p.Reg == v.Reg)
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau(a, v) != 0 {
+ if a.Type == obj.TYPE_SHIFT {
+ if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+ a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
+ }
+ if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+ a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8
+ }
+ } else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+ if a.Offset == int64(v.Reg) {
+ a.Offset = int64(s.Reg)
+ }
+ if a.Reg == v.Reg {
+ a.Reg = s.Reg
+ }
+ } else {
+ a.Reg = s.Reg
+ }
+ }
+ }
+
+ return 0
+}
+
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau1(p1, v) != 0 {
+ p1.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+var predinfo = []struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+}{
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABEQ, arm.ABNE, 0x0, 0x1},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABNE, arm.ABEQ, 0x1, 0x0},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABCS, arm.ABCC, 0x2, 0x3},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABHS, arm.ABLO, 0x2, 0x3},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABCC, arm.ABCS, 0x3, 0x2},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLO, arm.ABHS, 0x3, 0x2},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABMI, arm.ABPL, 0x4, 0x5},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABPL, arm.ABMI, 0x5, 0x4},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABVS, arm.ABVC, 0x6, 0x7},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABVC, arm.ABVS, 0x7, 0x6},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABHI, arm.ABLS, 0x8, 0x9},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLS, arm.ABHI, 0x9, 0x8},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABGE, arm.ABLT, 0xA, 0xB},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLT, arm.ABGE, 0xB, 0xA},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABGT, arm.ABLE, 0xC, 0xD},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLE, arm.ABGT, 0xD, 0xC},
+}
+
+type Joininfo struct {
+ start *gc.Flow
+ last *gc.Flow
+ end *gc.Flow
+ len int
+}
+
+const (
+ Join = iota
+ Split
+ End
+ Branch
+ Setcond
+ Toolong
+)
+
+const (
+ Falsecond = iota
+ Truecond
+ Delbranch
+ Keepbranch
+)
+
+func isbranch(p *obj.Prog) int {
+ return bool2int((arm.ABEQ <= p.As) && (p.As <= arm.ABLE))
+}
+
+func predicable(p *obj.Prog) int {
+ switch p.As {
+ case obj.ANOP,
+ obj.AXXX,
+ obj.ADATA,
+ obj.AGLOBL,
+ obj.ATEXT,
+ arm.AWORD,
+ arm.ABCASE,
+ arm.ACASE:
+ return 0
+ }
+
+ if isbranch(p) != 0 {
+ return 0
+ }
+ return 1
+}
+
+/*
+ * Depends on an analysis of the encodings performed by 5l.
+ * These seem to be all of the opcodes that lead to the "S" bit
+ * being set in the instruction encodings.
+ *
+ * C_SBIT may also have been set explicitly in p->scond.
+ */
+func modifiescpsr(p *obj.Prog) int {
+ switch p.As {
+ case arm.AMULLU,
+ arm.AMULA,
+ arm.AMULU,
+ arm.ADIVU,
+ arm.ATEQ,
+ arm.ACMN,
+ arm.ATST,
+ arm.ACMP,
+ arm.AMUL,
+ arm.ADIV,
+ arm.AMOD,
+ arm.AMODU,
+ arm.ABL:
+ return 1
+ }
+
+ if p.Scond&arm.C_SBIT != 0 {
+ return 1
+ }
+ return 0
+}
+
+/*
+ * Find the maximal chain of instructions starting with r which could
+ * be executed conditionally
+ */
+func joinsplit(r *gc.Flow, j *Joininfo) int {
+ j.start = r
+ j.last = r
+ j.len = 0
+ for {
+ if r.P2 != nil && (r.P1 != nil || r.P2.P2link != nil) {
+ j.end = r
+ return Join
+ }
+
+ if r.S1 != nil && r.S2 != nil {
+ j.end = r
+ return Split
+ }
+
+ j.last = r
+ if r.Prog.As != obj.ANOP {
+ j.len++
+ }
+ if !(r.S1 != nil) && !(r.S2 != nil) {
+ j.end = r.Link
+ return End
+ }
+
+ if r.S2 != nil {
+ j.end = r.S2
+ return Branch
+ }
+
+ if modifiescpsr(r.Prog) != 0 {
+ j.end = r.S1
+ return Setcond
+ }
+
+ r = r.S1
+ if !(j.len < 4) {
+ break
+ }
+ }
+
+ j.end = r
+ return Toolong
+}
+
+func successor(r *gc.Flow) *gc.Flow {
+ if r.S1 != nil {
+ return r.S1
+ } else {
+ return r.S2
+ }
+}
+
+func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
+ var pred int
+ var r *gc.Flow
+
+ if j.len == 0 {
+ return
+ }
+ if cond == Truecond {
+ pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
+ } else {
+ pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
+ }
+
+ for r = j.start; ; r = successor(r) {
+ if r.Prog.As == arm.AB {
+ if r != j.last || branch == Delbranch {
+ excise(r)
+ } else {
+ if cond == Truecond {
+ r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+ } else {
+ r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+ }
+ }
+ } else if predicable(r.Prog) != 0 {
+ r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
+ }
+ if r.S1 != r.Link {
+ r.S1 = r.Link
+ r.Link.P1 = r
+ }
+
+ if r == j.last {
+ break
+ }
+ }
+}
+
+func predicate(g *gc.Graph) {
+ var r *gc.Flow
+ var t1 int
+ var t2 int
+ var j1 Joininfo
+ var j2 Joininfo
+
+ for r = g.Start; r != nil; r = r.Link {
+ if isbranch(r.Prog) != 0 {
+ t1 = joinsplit(r.S1, &j1)
+ t2 = joinsplit(r.S2, &j2)
+ if j1.last.Link != j2.start {
+ continue
+ }
+ if j1.end == j2.end {
+ if (t1 == Branch && (t2 == Join || t2 == Setcond)) || (t2 == Join && (t1 == Join || t1 == Setcond)) {
+ applypred(r, &j1, Falsecond, Delbranch)
+ applypred(r, &j2, Truecond, Delbranch)
+ excise(r)
+ continue
+ }
+ }
+
+ if t1 == End || t1 == Branch {
+ applypred(r, &j1, Falsecond, Keepbranch)
+ excise(r)
+ continue
+ }
+ }
+ }
+}
+
+func isdconst(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_CONST)
+}
+
+func isfloatreg(a *obj.Addr) int {
+ return bool2int(arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
+}
+
+func stackaddr(a *obj.Addr) int {
+ return bool2int(regtyp(a) != 0 && a.Reg == arm.REGSP)
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) int {
+ return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ obj.Nopout(p)
+}
diff --git a/src/cmd/new5g/prog.go b/src/cmd/new5g/prog.go
new file mode 100644
index 0000000000..3f7715f1fc
--- /dev/null
+++ b/src/cmd/new5g/prog.go
@@ -0,0 +1,163 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+const (
+ RightRdwr = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [arm.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+ // Integer.
+ arm.AADC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AADD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AAND: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ABIC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ACMN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ACMP: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ADIVU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ADIV: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AEOR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMODU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMOD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULALU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULAL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMUL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULLU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMVN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ arm.AORR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ARSB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ARSC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASBC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASRA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASUB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ATEQ: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ATST: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+
+ // Floating point.
+ arm.AADDD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AADDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ACMPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ACMPF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ADIVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ADIVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AMULD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AMULF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ASUBD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ASUBF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+
+ // Conversions.
+ arm.AMOVWD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVWF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVDW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVFD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVFW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Moves.
+ arm.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVH: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // In addtion, duffzero reads R0,R1 and writes R1. This fact is
+ // encoded in peep.c
+ obj.ADUFFZERO: gc.ProgInfo{gc.Call, 0, 0, 0},
+
+ // In addtion, duffcopy reads R1,R2 and writes R0,R1,R2. This fact is
+ // encoded in peep.c
+ obj.ADUFFCOPY: gc.ProgInfo{gc.Call, 0, 0, 0},
+
+ // These should be split into the two different conversions instead
+ // of overloading the one.
+ arm.AMOVBS: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVBU: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVHS: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVHU: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Jumps.
+ arm.AB: gc.ProgInfo{gc.Jump | gc.Break, 0, 0, 0},
+ arm.ABL: gc.ProgInfo{gc.Call, 0, 0, 0},
+ arm.ABEQ: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABNE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABCS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABHS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABCC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLO: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABMI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABPL: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABVS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABVC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABHI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABGE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABGT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ obj.ARET: gc.ProgInfo{gc.Break, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.CanRegRead | gc.RightRead
+ }
+
+ if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
+ info.Flags |= gc.RightRead
+ }
+
+ switch p.As {
+ case arm.ADIV,
+ arm.ADIVU,
+ arm.AMOD,
+ arm.AMODU:
+ info.Regset |= RtoB(arm.REG_R12)
+ }
+}
diff --git a/src/cmd/new5g/reg.go b/src/cmd/new5g/reg.go
new file mode 100644
index 0000000000..2afdf12416
--- /dev/null
+++ b/src/cmd/new5g/reg.go
@@ -0,0 +1,136 @@
+// Inferno utils/5c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/arm"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 32
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * bit reg
+ * 0 R0
+ * 1 R1
+ * ... ...
+ * 10 R10
+ * 12 R12
+ *
+ * bit reg
+ * 18 F2
+ * 19 F3
+ * ... ...
+ * 31 F15
+ */
+func RtoB(r int) uint64 {
+ if arm.REG_R0 <= r && r <= arm.REG_R15 {
+ if r >= arm.REGTMP-2 && r != arm.REG_R12 { // excluded R9 and R10 for m and g, but not R12
+ return 0
+ }
+ return 1 << uint(r-arm.REG_R0)
+ }
+
+ if arm.REG_F0 <= r && r <= arm.REG_F15 {
+ if r < arm.REG_F2 || r > arm.REG_F0+arm.NFREG-1 {
+ return 0
+ }
+ return 1 << uint((r-arm.REG_F0)+16)
+ }
+
+ return 0
+}
+
+func BtoR(b uint64) int {
+ // TODO Allow R0 and R1, but be careful with a 0 return
+ // TODO Allow R9. Only R10 is reserved now (just g, not m).
+ b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + arm.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b &= 0xfffc0000
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 16 + arm.REG_F0
+}
diff --git a/src/cmd/new5g/util.go b/src/cmd/new5g/util.go
new file mode 100644
index 0000000000..bb5eedb15a
--- /dev/null
+++ b/src/cmd/new5g/util.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/src/cmd/new6g/cgen.go b/src/cmd/new6g/cgen.go
new file mode 100644
index 0000000000..83c063e3d2
--- /dev/null
+++ b/src/cmd/new6g/cgen.go
@@ -0,0 +1,1889 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var a int
+ var f int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ goto ret
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ goto ret
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ if !(res.Addable != 0) {
+ if n.Ullman > res.Ullman {
+ regalloc(&n1, n.Type, res)
+ cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ gc.Dump("n1", &n1)
+ gc.Dump("res", res)
+ gc.Fatal("loop in cgen")
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if res.Ullman >= gc.UINF {
+ goto gen
+ }
+
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case gc.OLITERAL:
+ if gc.Smallintconst(n) != 0 {
+ f = 0
+ }
+
+ case gc.OREGISTER:
+ f = 0
+ }
+
+ if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ a = optoas(gc.OAS, res.Type)
+ if sudoaddable(a, res, &addr) != 0 {
+ if f != 0 {
+ regalloc(&n2, res.Type, nil)
+ cgen(n, &n2)
+ p1 = gins(a, &n2, nil)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, n, nil)
+ }
+ p1.To = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ sudoclean()
+ goto ret
+ }
+ }
+
+ gen:
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ gmove(n, res)
+ goto ret
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr) != 0 {
+ if res.Op == gc.OREGISTER {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ } else {
+ regalloc(&n2, n.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ gins(a, &n2, res)
+ regfree(&n2)
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(1), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(0), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ case gc.OMINUS:
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = optoas(gc.OMUL, nl.Type)
+ goto sbop
+ }
+
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ if a == x86.AIMULB {
+ cgen_bmul(int(n.Op), nl, nr, res)
+ break
+ }
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME:
+ igen(nl, &n1, res)
+ regalloc(&n2, n.Type, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+ goto ret
+ }
+ }
+
+ regalloc(&n1, nl.Type, res)
+ regalloc(&n2, n.Type, &n1)
+ cgen(nl, &n1)
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ gmove(&n1, &n2)
+
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(x86.ALEAQ, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Widthint)
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ if n.Bounded != 0 { // let race detector avoid nil checks
+ gc.Disable_checknil++
+ }
+ agen(nl, res)
+ if n.Bounded != 0 {
+ gc.Disable_checknil--
+ }
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ if gc.Isfloat[n.Type.Etype] != 0 {
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ cgen_div(int(n.Op), &n1, nr, res)
+ regfree(&n1)
+ } else {
+ if !(gc.Smallintconst(nr) != 0) {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(int(n.Op), nl, &n2, res)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ }
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ }
+
+ goto ret
+
+ /*
+ * put simplest on right - we'll generate into left
+ * and then adjust it using the computation of right.
+ * constants and variables have the same ullman
+ * count, so look for constants specially.
+ *
+ * an integer constant we can use as an immediate
+ * is simpler than a variable - we can use the immediate
+ * in the adjustment instruction directly - so it goes
+ * on the right.
+ *
+ * other constants, like big integers or floating point
+ * constants, require a mov into a register, so those
+ * might as well go on the left, so we can reuse that
+ * register for the computation.
+ */
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+
+ /*
+ * This generates smaller code - it avoids a MOV - but it's
+ * easily 10% slower due to not being able to
+ * optimize/manipulate the move.
+ * To see, run: go test -bench . crypto/md5
+ * with and without.
+ *
+ if(sudoaddable(a, nr, &addr)) {
+ p1 = gins(a, N, &n1);
+ p1->from = addr;
+ gmove(&n1, res);
+ sudoclean();
+ regfree(&n1);
+ goto ret;
+ }
+ *
+ */
+ if gc.Smallintconst(nr) != 0 {
+ n2 = *nr
+ } else {
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ }
+ } else {
+ if gc.Smallintconst(nr) != 0 {
+ n2 = *nr
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+uop: // unary
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, n.Type, res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n5 gc.Node
+ var tmp gc.Node
+ var tmp2 gc.Node
+ var nlen gc.Node
+ var p1 *obj.Prog
+ var t *gc.Type
+ var w uint64
+ var v uint64
+ var freelen int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ freelen = 0
+ w = uint64(n.Type.Width)
+
+ // Generate the non-addressable child first.
+ if nr.Addable != 0 {
+ goto irad
+ }
+ if nl.Addable != 0 {
+ cgenr(nr, &n1, nil)
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if gc.Isfixedarray(nl.Type) != 0 {
+ agenr(nl, &n3, res)
+ } else {
+ igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = gc.Types[gc.Tptr]
+ nlen.Xoffset += int64(gc.Array_array)
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ gmove(&nlen, &n3)
+ nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ }
+ }
+
+ goto index
+ }
+
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+
+ irad:
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if gc.Isfixedarray(nl.Type) != 0 {
+ agenr(nl, &n3, res)
+ } else {
+ if !(nl.Addable != 0) {
+ // igen will need an addressable node.
+ gc.Tempname(&tmp2, nl.Type)
+
+ cgen(nl, &tmp2)
+ nl = &tmp2
+ }
+
+ igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = gc.Types[gc.Tptr]
+ nlen.Xoffset += int64(gc.Array_array)
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ gmove(&nlen, &n3)
+ nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ }
+ }
+
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ cgenr(nr, &n1, nil)
+ }
+
+ goto index
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ index:
+ if gc.Isconst(nr, gc.CTINT) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Fatal("constant string constant index") // front end should handle
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
+ if gc.Smallintconst(nr) != 0 {
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
+ } else {
+ regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
+ gmove(&n2, &tmp)
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp)
+ regfree(&tmp)
+ }
+
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ regfree(&nlen)
+ }
+
+ if v*w != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3)
+ }
+ *a = n3
+ break
+ }
+
+ // type of the index
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[n1.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+
+ regalloc(&n2, t, &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ // check bounds
+ t = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if gc.Is64(nr.Type) != 0 {
+ t = gc.Types[gc.TUINT64]
+ }
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if gc.Is64(nr.Type) != 0 {
+ regalloc(&n5, t, nil)
+ gmove(&nlen, &n5)
+ regfree(&nlen)
+ nlen = n5
+ }
+ } else {
+ gc.Nodconst(&nlen, t, nl.Type.Bound)
+ if !(gc.Smallintconst(&nlen) != 0) {
+ regalloc(&n5, t, nil)
+ gmove(&nlen, &n5)
+ nlen = n5
+ freelen = 1
+ }
+ }
+
+ gins(optoas(gc.OCMP, t), &n2, &nlen)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(x86.ALEAQ, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gins(x86.AADDQ, &n2, &n3)
+ goto indexdone
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ p1 = gins(x86.ALEAQ, &n2, &n3)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Scale = int8(w)
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ } else {
+ ginscon(optoas(gc.OMUL, t), int64(w), &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ *a = n3
+ regfree(&n2)
+ if freelen != 0 {
+ regfree(&nlen)
+ }
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(x86.ALEAQ, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ gins(x86.ALEAQ, n, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != x86.REG_SP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.ODOTPTR:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = x86.REG_SP
+ a.Addable = 1
+ a.Xoffset = fp.Width
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ // Compute &a.
+ if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(1)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ switch n.Op {
+ default:
+ goto def
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == !(n.Val.U.Bval != 0) {
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
+ }
+ goto ret
+
+ case gc.ONAME:
+ if n.Addable == 0 {
+ goto def
+ }
+ gc.Nodconst(&n1, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), n, &n1)
+ a = x86.AJNE
+ if !true_ {
+ a = x86.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nr.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit // avoid re-genning ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ goto cmp
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ if gc.Smallintconst(nr) != 0 {
+ gins(optoas(gc.OCMP, nr.Type), &n1, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ regfree(&n1)
+ break
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ // only < and <= work right with NaN; reverse if needed
+ cmp:
+ l = &n1
+
+ r = &n2
+ if gc.Isfloat[nl.Type.Etype] != 0 && (a == gc.OGT || a == gc.OGE) {
+ l = &n2
+ r = &n1
+ a = gc.Brrev(a)
+ }
+
+ gins(optoas(gc.OCMP, nr.Type), l, r)
+
+ if gc.Isfloat[nr.Type.Etype] != 0 && (n.Op == gc.OEQ || n.Op == gc.ONE) {
+ if n.Op == gc.OEQ {
+ // neither NE nor P
+ p1 = gc.Gbranch(x86.AJNE, nil, -likely)
+
+ p2 = gc.Gbranch(x86.AJPS, nil, -likely)
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ // either NE or P
+ gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
+
+ gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
+ }
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ }
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+def:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = x86.AJNE
+ if !true_ {
+ a = x86.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int64 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int64
+
+ switch n.Op {
+ case gc.OINDREG:
+ return n.Xoffset
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !(gc.Isfixedarray(t) != 0) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return t.Width
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&ns, &n, w);
+ */
+func sgen(n *gc.Node, ns *gc.Node, w int64) {
+ var nodl gc.Node
+ var nodr gc.Node
+ var nodsi gc.Node
+ var noddi gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var tmp gc.Node
+ var c int64
+ var q int64
+ var odst int64
+ var osrc int64
+ var l *gc.NodeList
+ var p *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", ns)
+ }
+
+ if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, ns) != 0 {
+ return
+ }
+
+ if w == 0 {
+ // evaluate side effects only
+ regalloc(&nodr, gc.Types[gc.Tptr], nil)
+
+ agen(ns, &nodr)
+ agen(n, &nodr)
+ regfree(&nodr)
+ return
+ }
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(ns)
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, ns, w)
+ return
+ }
+
+ gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
+ gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
+
+ if n.Ullman >= ns.Ullman {
+ agenr(n, &nodr, &nodsi)
+ if ns.Op == gc.ONAME {
+ gc.Gvardef(ns)
+ }
+ agenr(ns, &nodl, &noddi)
+ } else {
+ if ns.Op == gc.ONAME {
+ gc.Gvardef(ns)
+ }
+ agenr(ns, &nodl, &noddi)
+ agenr(n, &nodr, &nodsi)
+ }
+
+ if nodl.Val.U.Reg != x86.REG_DI {
+ gmove(&nodl, &noddi)
+ }
+ if nodr.Val.U.Reg != x86.REG_SI {
+ gmove(&nodr, &nodsi)
+ }
+ regfree(&nodl)
+ regfree(&nodr)
+
+ c = w % 8 // bytes
+ q = w / 8 // quads
+
+ savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ if osrc < odst && odst < osrc+w {
+ // reverse direction
+ gins(x86.ASTD, nil, nil) // set direction flag
+ if c > 0 {
+ gconreg(addptr, w-1, x86.REG_SI)
+ gconreg(addptr, w-1, x86.REG_DI)
+
+ gconreg(movptr, c, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+ }
+
+ if q > 0 {
+ if c > 0 {
+ gconreg(addptr, -7, x86.REG_SI)
+ gconreg(addptr, -7, x86.REG_DI)
+ } else {
+ gconreg(addptr, w-8, x86.REG_SI)
+ gconreg(addptr, w-8, x86.REG_DI)
+ }
+
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
+ }
+
+ // we leave with the flag clear
+ gins(x86.ACLD, nil, nil)
+ } else {
+ // normal direction
+ if q > 128 || (gc.Nacl && q >= 4) {
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFCOPY, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+ // 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
+ p.To.Offset = 14 * (128 - q)
+ } else if !gc.Nacl && c == 0 {
+ // We don't need the MOVSQ side-effect of updating SI and DI,
+ // and issuing a sequence of MOVQs directly is faster.
+ nodsi.Op = gc.OINDREG
+
+ noddi.Op = gc.OINDREG
+ for q > 0 {
+ gmove(&nodsi, &cx) // MOVQ x+(SI),CX
+ gmove(&cx, &noddi) // MOVQ CX,x+(DI)
+ nodsi.Xoffset += 8
+ noddi.Xoffset += 8
+ q--
+ }
+ } else {
+ for q > 0 {
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+ q--
+ }
+ }
+
+ // copy the remaining c bytes
+ if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
+ for c > 0 {
+ gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+ c--
+ }
+ } else if w < 8 || c <= 4 {
+ nodsi.Op = gc.OINDREG
+ noddi.Op = gc.OINDREG
+ cx.Type = gc.Types[gc.TINT32]
+ nodsi.Type = gc.Types[gc.TINT32]
+ noddi.Type = gc.Types[gc.TINT32]
+ if c > 4 {
+ nodsi.Xoffset = 0
+ noddi.Xoffset = 0
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ }
+
+ nodsi.Xoffset = c - 4
+ noddi.Xoffset = c - 4
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ } else {
+ nodsi.Op = gc.OINDREG
+ noddi.Op = gc.OINDREG
+ cx.Type = gc.Types[gc.TINT64]
+ nodsi.Type = gc.Types[gc.TINT64]
+ noddi.Type = gc.Types[gc.TINT64]
+ nodsi.Xoffset = c - 8
+ noddi.Xoffset = c - 8
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ }
+ }
+
+ restx(&cx, &oldcx)
+}
+
+func cadable(n *gc.Node) int {
+ if !(n.Addable != 0) {
+ // dont know how it happens,
+ // but it does
+ return 0
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) int {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) != 0 {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) != 0 {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !(cadable(nl) != 0) {
+ if nr != nil && !(cadable(nr) != 0) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !(cadable(nr) != 0) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !(gc.Isslice(t) != 0) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 0
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 1
+}
diff --git a/src/cmd/new6g/galign.go b/src/cmd/new6g/galign.go
new file mode 100644
index 0000000000..d5d1e9ad59
--- /dev/null
+++ b/src/cmd/new6g/galign.go
@@ -0,0 +1,109 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+var thechar int = '6'
+
+var thestring string = "amd64"
+
+var thelinkarch *obj.LinkArch = &x86.Linkamd64
+
+func linkarchinit() {
+ if obj.Getgoarch() == "amd64p32" {
+ thelinkarch = &x86.Linkamd64p32
+ gc.Thearch.Thelinkarch = thelinkarch
+ thestring = "amd64p32"
+ gc.Thearch.Thestring = "amd64p32"
+ }
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+var addptr int = x86.AADDQ
+
+var movptr int = x86.AMOVQ
+
+var leaptr int = x86.ALEAQ
+
+var cmpptr int = x86.ACMPQ
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT64},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+ if obj.Getgoarch() == "amd64p32" {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ addptr = x86.AADDL
+ movptr = x86.AMOVL
+ leaptr = x86.ALEAL
+ cmpptr = x86.ACMPL
+ typedefs[0].Sameas = gc.TINT32
+ typedefs[1].Sameas = gc.TUINT32
+ typedefs[2].Sameas = gc.TUINT32
+ }
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = x86.REGSP
+ gc.Thearch.REGCTXT = x86.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = FtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
diff --git a/src/cmd/new6g/gg.go b/src/cmd/new6g/gg.go
new file mode 100644
index 0000000000..2deed5deb9
--- /dev/null
+++ b/src/cmd/new6g/gg.go
@@ -0,0 +1,24 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/x86"
+import "cmd/internal/gc"
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var reg [x86.MAXREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
diff --git a/src/cmd/new6g/ggen.go b/src/cmd/new6g/ggen.go
new file mode 100644
index 0000000000..2e323c923f
--- /dev/null
+++ b/src/cmd/new6g/ggen.go
@@ -0,0 +1,1169 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var ax uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ ax = 0
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !(n.Needzero != 0) {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &ax)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt%int64(gc.Widthreg) != 0 {
+ // should only happen with nacl
+ if cnt%int64(gc.Widthptr) != 0 {
+ gc.Fatal("zerorange count not a multiple of widthptr %d", cnt)
+ }
+ p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
+ lo += int64(gc.Widthptr)
+ cnt -= int64(gc.Widthptr)
+ }
+
+ if cnt <= int64(4*gc.Widthreg) {
+ for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
+ }
+ } else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
+ p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 2*(128-cnt/int64(gc.Widthreg)))
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ } else {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+ p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+ p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var stk gc.Node
+ var r1 gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an x86 NOP that we will have the right line number.
+ // x86 NOP 0x90 is really XCHG AX, AX; use that description
+ // because the NOP pseudo-instruction would be removed by
+ // the linker.
+ gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
+
+ gins(x86.AXCHGL, &reg, &reg)
+ }
+
+ p = gins(obj.ACALL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) != 0 {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(&reg, gc.Types[gc.Tptr], x86.REG_DX)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
+ gmove(f, &reg)
+ reg.Op = gc.OINDREG
+ gmove(&reg, &r1)
+ reg.Op = gc.OREGISTER
+ gins(obj.ACALL, &reg, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ stk = gc.Node{}
+
+ stk.Op = gc.OINDREG
+ stk.Val.U.Reg = x86.REG_SP
+ stk.Xoffset = 0
+
+ if gc.Widthptr == 8 {
+ // size of arguments at 0(SP)
+ ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
+
+ // FuncVal* at 8(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gc.Nodreg(&reg, gc.Types[gc.TINT64], x86.REG_AX)
+ gmove(f, &reg)
+ gins(x86.AMOVQ, &reg, &stk)
+ } else {
+ // size of arguments at 0(SP)
+ ginscon(x86.AMOVL, int64(gc.Argsize(f.Type)), &stk)
+
+ // FuncVal* at 4(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
+ gmove(f, &reg)
+ gins(x86.AMOVL, &reg, &stk)
+ }
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ if !(gc.Hasdefer != 0) {
+ gc.Fatal("hasdefer=0 but has defer")
+ }
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
+ gins(x86.ATESTL, &reg, &reg)
+ p = gc.Gbranch(x86.AJEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if !(i.Addable != 0) {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
+
+ nodsp.Xoffset = 0
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ gins(x86.ALEAQ, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = x86.REG_SP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = x86.REG_SP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ gins(leaptr, &nod1, &nod2)
+ gins(movptr, &nod2, res)
+ regfree(&nod2)
+ } else {
+ gins(leaptr, &nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var a int
+ var check int
+ var n3 gc.Node
+ var n4 gc.Node
+ var t *gc.Type
+ var t0 *gc.Type
+ var ax gc.Node
+ var dx gc.Node
+ var ax1 gc.Node
+ var n31 gc.Node
+ var oldax gc.Node
+ var olddx gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will trap.
+ // Also the byte divide instruction needs AH,
+ // which we otherwise don't have to deal with.
+ // Easiest way to avoid for int8, int16: use int32.
+ // For int32 and int64, use explicit test.
+ // Could use int64 hw for int32.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 4 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ check = 0
+ }
+
+ a = optoas(op, t)
+
+ regalloc(&n3, t0, nil)
+ if nl.Ullman >= nr.Ullman {
+ savex(x86.REG_AX, &ax, &oldax, res, t0)
+ cgen(nl, &ax)
+ regalloc(&ax, t0, &ax) // mark ax live during cgen
+ cgen(nr, &n3)
+ regfree(&ax)
+ } else {
+ cgen(nr, &n3)
+ savex(x86.REG_AX, &ax, &oldax, res, t0)
+ cgen(nl, &ax)
+ }
+
+ if t != t0 {
+ // Convert
+ ax1 = ax
+
+ n31 = n3
+ ax.Type = t
+ n3.Type = t
+ gmove(&ax1, &ax)
+ gmove(&n31, &n3)
+ }
+
+ p2 = nil
+ if gc.Nacl {
+ // Native Client does not relay the divide-by-zero trap
+ // to the executing program, so we must insert a check
+ // for ourselves.
+ gc.Nodconst(&n4, t, 0)
+
+ gins(optoas(gc.OCMP, t), &n3, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if check != 0 {
+ gc.Nodconst(&n4, t, -1)
+ gins(optoas(gc.OCMP, t), &n3, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &ax)
+
+ gmove(&ax, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&n4, t, 0)
+
+ gmove(&n4, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ savex(x86.REG_DX, &dx, &olddx, res, t)
+ if !(gc.Issigned[t.Etype] != 0) {
+ gc.Nodconst(&n4, t, 0)
+ gmove(&n4, &dx)
+ } else {
+ gins(optoas(gc.OEXTEND, t), nil, nil)
+ }
+ gins(a, &n3, nil)
+ regfree(&n3)
+ if op == gc.ODIV {
+ gmove(&ax, res)
+ } else {
+ gmove(&dx, res)
+ }
+ restx(&dx, &olddx)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+ restx(&ax, &oldax)
+}
+
+/*
+ * register dr is one of the special ones (AX, CX, DI, SI, etc.).
+ * we need to use it. if it is already allocated as a temporary
+ * (r > 1; can only happen if a routine like sgen passed a
+ * special as cgen's res and then cgen used regalloc to reuse
+ * it as its own temporary), then move it for now to another
+ * register. caller must call restx to move it back.
+ * the move is not necessary if dr == res, because res is
+ * known to be dead.
+ */
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+ var r int
+
+ r = int(reg[dr])
+
+ // save current ax and dx if they are live
+ // and not the destination
+ *oldx = gc.Node{}
+
+ gc.Nodreg(x, t, dr)
+ if r > 1 && !(gc.Samereg(x, res) != 0) {
+ regalloc(oldx, gc.Types[gc.TINT64], nil)
+ x.Type = gc.Types[gc.TINT64]
+ gmove(x, oldx)
+ x.Type = t
+ oldx.Ostk = int32(r) // squirrel away old r value
+ reg[dr] = 1
+ }
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+ if oldx.Op != 0 {
+ x.Type = gc.Types[gc.TINT64]
+ reg[x.Val.U.Reg] = uint8(oldx.Ostk)
+ gmove(oldx, x)
+ regfree(oldx)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var w int
+ var a int
+ var m gc.Magic
+
+ if nr.Op != gc.OLITERAL {
+ goto longdiv
+ }
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch gc.Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case gc.TUINT64:
+ m.W = w
+ m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ gc.Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, nil)
+ gc.Nodconst(&n2, nl.Type, int64(m.Um))
+ regalloc(&n3, nl.Type, res)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+
+ gc.Nodconst(&n2, nl.Type, 1)
+ gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
+ gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
+ } else {
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+
+ case gc.TINT64:
+ m.W = w
+ m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
+ gc.Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, res)
+ gc.Nodconst(&n2, nl.Type, m.Sm)
+ regalloc(&n3, nl.Type, nil)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+ }
+
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
+
+ gc.Nodconst(&n2, nl.Type, int64(w)-1)
+
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ regalloc(&n2, nl.Type, nil)
+ cgen_div(gc.ODIV, &n1, nr, &n2)
+ a = optoas(gc.OMUL, nl.Type)
+ if w == 8 {
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ a = x86.AIMULW
+ }
+
+ if !(gc.Smallintconst(nr) != 0) {
+ regalloc(&n3, nl.Type, nil)
+ cgen(nr, &n3)
+ gins(a, &n3, &n2)
+ regfree(&n3)
+ } else {
+ gins(a, nr, &n2)
+ }
+ gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var t *gc.Type
+ var a int
+ var n1 gc.Node
+ var n2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var tmp *gc.Node
+
+ t = nl.Type
+ a = optoas(gc.OHMUL, t)
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ cgenr(nl, &n1, res)
+ cgenr(nr, &n2, nil)
+ gc.Nodreg(&ax, t, x86.REG_AX)
+ gmove(&n1, &ax)
+ gins(a, &n2, nil)
+ regfree(&n2)
+ regfree(&n1)
+
+ if t.Width == 1 {
+ // byte multiply behaves differently.
+ gc.Nodreg(&ax, t, x86.REG_AH)
+
+ gc.Nodreg(&dx, t, x86.REG_DX)
+ gmove(&ax, &dx)
+ }
+
+ gc.Nodreg(&dx, t, x86.REG_DX)
+ gmove(&dx, res)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var n5 gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var a int
+ var rcx int
+ var p1 *obj.Prog
+ var sc uint64
+ var tcount *gc.Type
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if nl.Ullman >= gc.UINF {
+ gc.Tempname(&n4, nl.Type)
+ cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ gc.Tempname(&n5, nr.Type)
+ cgen(nr, &n5)
+ nr = &n5
+ }
+
+ rcx = int(reg[x86.REG_CX])
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
+
+ oldcx = gc.Node{}
+ if rcx > 0 && !(gc.Samereg(&cx, res) != 0) {
+ regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
+ gmove(&cx, &oldcx)
+ }
+
+ cx.Type = tcount
+
+ if gc.Samereg(&cx, res) != 0 {
+ regalloc(&n2, nl.Type, nil)
+ } else {
+ regalloc(&n2, nl.Type, res)
+ }
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ cgen(nl, &n2)
+ }
+
+ regfree(&n3)
+
+ // test and fix up large shifts
+ if !(bounded != 0) {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ if oldcx.Op != 0 {
+ cx.Type = gc.Types[gc.TUINT64]
+ gmove(&oldcx, &cx)
+ regfree(&oldcx)
+ }
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+
+ret:
+}
+
+/*
+ * generate byte multiply:
+ * res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n1b gc.Node
+ var n2b gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var a int
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ // generate operands in "8-bit" registers.
+ regalloc(&n1b, nl.Type, res)
+
+ cgen(nl, &n1b)
+ regalloc(&n2b, nr.Type, nil)
+ cgen(nr, &n2b)
+
+ // perform full-width multiplication.
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+ gc.Nodreg(&n1, t, int(n1b.Val.U.Reg))
+ gc.Nodreg(&n2, t, int(n2b.Val.U.Reg))
+ a = optoas(op, t)
+ gins(a, &n2, &n1)
+
+ // truncate.
+ gmove(&n1, res)
+
+ regfree(&n1b)
+ regfree(&n2b)
+}
+
+func clearfat(nl *gc.Node) {
+ var w int64
+ var c int64
+ var q int64
+ var n1 gc.Node
+ var oldn1 gc.Node
+ var ax gc.Node
+ var oldax gc.Node
+ var di gc.Node
+ var z gc.Node
+ var p *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = nl.Type.Width
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) != 0 {
+ return
+ }
+
+ c = w % 8 // bytes
+ q = w / 8 // quads
+
+ if q < 4 {
+ // Write sequence of MOV 0, off(base) instead of using STOSQ.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSQ loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ agenr(nl, &n1, nil)
+
+ n1.Op = gc.OINDREG
+ gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+ for {
+ tmp14 := q
+ q--
+ if !(tmp14 > 0) {
+ break
+ }
+ n1.Type = z.Type
+ gins(x86.AMOVQ, &z, &n1)
+ n1.Xoffset += 8
+ }
+
+ if c >= 4 {
+ gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
+ n1.Type = z.Type
+ gins(x86.AMOVL, &z, &n1)
+ n1.Xoffset += 4
+ c -= 4
+ }
+
+ gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+ for {
+ tmp15 := c
+ c--
+ if !(tmp15 > 0) {
+ break
+ }
+ n1.Type = z.Type
+ gins(x86.AMOVB, &z, &n1)
+ n1.Xoffset++
+ }
+
+ regfree(&n1)
+ return
+ }
+
+ savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
+ agen(nl, &n1)
+
+ savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
+ gconreg(x86.AMOVL, 0, x86.REG_AX)
+
+ if q > 128 || gc.Nacl {
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
+ } else {
+ p = gins(obj.ADUFFZERO, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+
+ // 2 and 128 = magic constants: see ../../runtime/asm_amd64.s
+ p.To.Offset = 2 * (128 - q)
+ }
+
+ z = ax
+ di = n1
+ if w >= 8 && c >= 4 {
+ di.Op = gc.OINDREG
+ z.Type = gc.Types[gc.TINT64]
+ di.Type = z.Type
+ p = gins(x86.AMOVQ, &z, &di)
+ p.To.Scale = 1
+ p.To.Offset = c - 8
+ } else if c >= 4 {
+ di.Op = gc.OINDREG
+ z.Type = gc.Types[gc.TINT32]
+ di.Type = z.Type
+ p = gins(x86.AMOVL, &z, &di)
+ if c > 4 {
+ p = gins(x86.AMOVL, &z, &di)
+ p.To.Scale = 1
+ p.To.Offset = c - 4
+ }
+ } else {
+ for c > 0 {
+ gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
+ c--
+ }
+ }
+
+ restx(&n1, &oldn1)
+ restx(&ax, &oldax)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+
+ // check is
+ // CMP arg, $0
+ // JNE 2(PC) (likely)
+ // MOV AX, 0
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = int16(cmpptr)
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
+ p1.As = x86.AJNE
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = 1 // likely
+ p1.To.Type = obj.TYPE_BRANCH
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ // if possible, since we know arg is 0, use 0(arg),
+ // which will be shorter to encode than plain 0.
+ p2.As = x86.AMOVL
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = x86.REG_AX
+ if regtyp(&p.From) != 0 {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = p.From.Reg
+ } else {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = x86.REG_NONE
+ }
+
+ p2.To.Offset = 0
+ }
+}
diff --git a/src/cmd/new6g/gsubr.go b/src/cmd/new6g/gsubr.go
new file mode 100644
index 0000000000..380c9e900d
--- /dev/null
+++ b/src/cmd/new6g/gsubr.go
@@ -0,0 +1,1755 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 6l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int64 = 4096
+
+var resvd = []int{
+ x86.REG_DI, // for movstring
+ x86.REG_SI, // for movstring
+
+ x86.REG_AX, // for divide
+ x86.REG_CX, // for shift
+ x86.REG_DX, // for divide
+ x86.REG_SP, // for stack
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ reg[i] = 0
+ }
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ reg[i] = 0
+ }
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+
+ if gc.Nacl {
+ reg[x86.REG_BP]++
+ reg[x86.REG_R15]++
+ } else if obj.Framepointer_enabled != 0 {
+ // BP is part of the calling convention of framepointer_enabled.
+ reg[x86.REG_BP]++
+ }
+}
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+ if gc.Nacl {
+ reg[x86.REG_BP]--
+ reg[x86.REG_R15]--
+ } else if obj.Framepointer_enabled != 0 {
+ reg[x86.REG_BP]--
+ }
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() int {
+ var i int
+ var j int
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return 1
+ ok:
+ }
+
+ return 0
+}
+
+var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TINT64,
+ gc.TUINT64,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= x86.REG_AX && i <= x86.REG_R15 {
+ goto out
+ }
+ }
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] == 0 {
+ regpc[i-x86.REG_AX] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = 0; i+x86.REG_AX <= x86.REG_R15; i++ {
+ fmt.Printf("%d %p\n", i, regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+ fallthrough
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= x86.REG_X0 && i <= x86.REG_X15 {
+ goto out
+ }
+ }
+
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ gc.Fatal("out of floating registers")
+ fallthrough
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
+ return
+
+out:
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == x86.REG_SP {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 && x86.REG_AX <= i && i <= x86.REG_R15 {
+ regpc[i-x86.REG_AX] = 0
+ }
+}
+
+/*
+ * generate
+ * as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+ var nr gc.Node
+
+ switch as {
+ case x86.AADDL,
+ x86.AMOVL,
+ x86.ALEAL:
+ gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
+
+ default:
+ gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
+ }
+
+ ginscon(as, c, &nr)
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ switch as {
+ case x86.AADDL,
+ x86.AMOVL,
+ x86.ALEAL:
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+
+ default:
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+ }
+
+ if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
+ // cannot have 64-bit immediate in ADD, etc.
+ // instead, MOV into register first.
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(x86.AMOVQ, &n1, &ntmp)
+ gins(as, &ntmp, n2)
+ regfree(&ntmp)
+ return
+ }
+
+ gins(as, &n1, n2)
+}
+
+/*
+ * set up nodes representing 2^63
+ */
+var bigi gc.Node
+
+var bigf gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
+ gc.Mpshiftfix(bigi.Val.U.Xval, 63)
+
+ bigf = bigi
+ bigf.Type = gc.Types[gc.TFLOAT64]
+ bigf.Val.Ctype = gc.CTFLT
+ bigf.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
+}
+
+/*
+ * generate move:
+ * t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var r3 gc.Node
+ var r4 gc.Node
+ var zero gc.Node
+ var one gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // some constants can't move directly to memory.
+ if gc.Ismem(t) != 0 {
+ // float constants come from memory.
+ if gc.Isfloat[tt] != 0 {
+ goto hard
+ }
+
+ // 64-bit immediates are really 32-bit sign-extended
+ // unless moving into a register.
+ if gc.Isint[tt] != 0 {
+ if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Minintval[gc.TINT32]) < 0 {
+ goto hard
+ }
+ if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Maxintval[gc.TINT32]) > 0 {
+ goto hard
+ }
+ }
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+ fallthrough
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TINT8<<16 | gc.TUINT8,
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = x86.AMOVB
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TINT16<<16 | gc.TUINT16,
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = x86.AMOVW
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = x86.AMOVL
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = x86.AMOVQL
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = x86.AMOVQ
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16:
+ a = x86.AMOVBWSX
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = x86.AMOVBLSX
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = x86.AMOVBQSX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16:
+ a = x86.AMOVBWZX
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = x86.AMOVBLZX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = x86.AMOVBQZX
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = x86.AMOVWLSX
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = x86.AMOVWQSX
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = x86.AMOVWLZX
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = x86.AMOVWQZX
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ a = x86.AMOVLQSX
+
+ goto rdst
+
+ // AMOVL into a register zeros the top of the register,
+ // so this is not always necessary, but if we rely on AMOVL
+ // the optimizer is almost certain to screw with us.
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = x86.AMOVLQZX
+
+ goto rdst
+
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT32:
+ a = x86.ACVTTSS2SL
+
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT32:
+ a = x86.ACVTTSD2SL
+ goto rdst
+
+ case gc.TFLOAT32<<16 | gc.TINT64:
+ a = x86.ACVTTSS2SQ
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT64:
+ a = x86.ACVTTSD2SQ
+ goto rdst
+
+ // convert via int32.
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hard
+
+ // algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+ case gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ a = x86.ACVTTSS2SQ
+
+ if ft == gc.TFLOAT64 {
+ a = x86.ACVTTSD2SQ
+ }
+ bignodes()
+ regalloc(&r1, gc.Types[ft], nil)
+ regalloc(&r2, gc.Types[tt], t)
+ regalloc(&r3, gc.Types[ft], nil)
+ regalloc(&r4, gc.Types[tt], nil)
+ gins(optoas(gc.OAS, f.Type), f, &r1)
+ gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
+ p1 = gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
+ gins(a, &r1, &r2)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gins(optoas(gc.OAS, f.Type), &bigf, &r3)
+ gins(optoas(gc.OSUB, f.Type), &r3, &r1)
+ gins(a, &r1, &r2)
+ gins(x86.AMOVQ, &bigi, &r4)
+ gins(x86.AXORQ, &r4, &r2)
+ gc.Patch(p2, gc.Pc)
+ gmove(&r2, t)
+ regfree(&r4)
+ regfree(&r3)
+ regfree(&r2)
+ regfree(&r1)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT32<<16 | gc.TFLOAT32:
+ a = x86.ACVTSL2SS
+
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TFLOAT64:
+ a = x86.ACVTSL2SD
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT32:
+ a = x86.ACVTSQ2SS
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT64:
+ a = x86.ACVTSQ2SD
+ goto rdst
+
+ // convert via int32
+ case gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hard
+
+ // algorithm is:
+ // if small enough, use native int64 -> uint64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ a = x86.ACVTSQ2SS
+
+ if tt == gc.TFLOAT64 {
+ a = x86.ACVTSQ2SD
+ }
+ gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
+ gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
+ regalloc(&r1, f.Type, f)
+ regalloc(&r2, t.Type, t)
+ regalloc(&r3, f.Type, nil)
+ regalloc(&r4, f.Type, nil)
+ gmove(f, &r1)
+ gins(x86.ACMPQ, &r1, &zero)
+ p1 = gc.Gbranch(x86.AJLT, nil, +1)
+ gins(a, &r1, &r2)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gmove(&r1, &r3)
+ gins(x86.ASHRQ, &one, &r3)
+ gmove(&r1, &r4)
+ gins(x86.AANDL, &one, &r4)
+ gins(x86.AORQ, &r4, &r3)
+ gins(a, &r3, &r2)
+ gins(optoas(gc.OADD, t.Type), &r2, &r2)
+ gc.Patch(p2, gc.Pc)
+ gmove(&r2, t)
+ regfree(&r4)
+ regfree(&r3)
+ regfree(&r2)
+ regfree(&r1)
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = x86.AMOVSS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = x86.AMOVSD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = x86.ACVTSS2SD
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = x86.ACVTSD2SS
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) int {
+ if f.Op != t.Op {
+ return 0
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var w int32
+ var p *obj.Prog
+ // Node nod;
+
+ var af obj.Addr
+ var at obj.Addr
+
+ // if(f != N && f->op == OINDEX) {
+ // regalloc(&nod, &regnode, Z);
+ // v = constnode.vconst;
+ // cgen(f->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ // }
+ // if(t != N && t->op == OINDEX) {
+ // regalloc(&nod, &regnode, Z);
+ // v = constnode.vconst;
+ // cgen(t->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ // }
+
+ switch as {
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if f != nil && t != nil && samaddr(f, t) != 0 {
+ return nil
+ }
+
+ case x86.ALEAQ:
+ if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+ gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0))
+ }
+ }
+
+ af = obj.Addr{}
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case x86.AMOVB:
+ w = 1
+
+ case x86.AMOVW:
+ w = 2
+
+ case x86.AMOVL:
+ w = 4
+
+ case x86.AMOVQ:
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ if p.To.Type == obj.TYPE_ADDR && w > 0 {
+ gc.Fatal("bad use of addr: %v", p)
+ }
+
+ return p
+}
+
+func fixlargeoffset(n *gc.Node) {
+ var a gc.Node
+
+ if n == nil {
+ return
+ }
+ if n.Op != gc.OINDREG {
+ return
+ }
+ if n.Val.U.Reg == x86.REG_SP { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // offset too large, add to register instead.
+ a = *n
+
+ a.Op = gc.OREGISTER
+ a.Type = gc.Types[gc.Tptr]
+ a.Xoffset = 0
+ gc.Cgen_checknil(&a)
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OADDR<<16 | gc.TPTR32:
+ a = x86.ALEAL
+
+ case gc.OADDR<<16 | gc.TPTR64:
+ a = x86.ALEAQ
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = x86.AJEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = x86.AJNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64:
+ a = x86.AJLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = x86.AJCS
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64:
+ a = x86.AJLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = x86.AJLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64:
+ a = x86.AJGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = x86.AJHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64:
+ a = x86.AJGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = x86.AJCC
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8:
+ a = x86.ACMPB
+
+ case gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16:
+ a = x86.ACMPW
+
+ case gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = x86.ACMPL
+
+ case gc.OCMP<<16 | gc.TINT64,
+ gc.OCMP<<16 | gc.TUINT64,
+ gc.OCMP<<16 | gc.TPTR64:
+ a = x86.ACMPQ
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = x86.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = x86.AUCOMISD
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8,
+ gc.OAS<<16 | gc.TUINT8:
+ a = x86.AMOVB
+
+ case gc.OAS<<16 | gc.TINT16,
+ gc.OAS<<16 | gc.TUINT16:
+ a = x86.AMOVW
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = x86.AMOVL
+
+ case gc.OAS<<16 | gc.TINT64,
+ gc.OAS<<16 | gc.TUINT64,
+ gc.OAS<<16 | gc.TPTR64:
+ a = x86.AMOVQ
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = x86.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = x86.AMOVSD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8:
+ a = x86.AADDB
+
+ case gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16:
+ a = x86.AADDW
+
+ case gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = x86.AADDL
+
+ case gc.OADD<<16 | gc.TINT64,
+ gc.OADD<<16 | gc.TUINT64,
+ gc.OADD<<16 | gc.TPTR64:
+ a = x86.AADDQ
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = x86.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = x86.AADDSD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8:
+ a = x86.ASUBB
+
+ case gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16:
+ a = x86.ASUBW
+
+ case gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = x86.ASUBL
+
+ case gc.OSUB<<16 | gc.TINT64,
+ gc.OSUB<<16 | gc.TUINT64,
+ gc.OSUB<<16 | gc.TPTR64:
+ a = x86.ASUBQ
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = x86.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = x86.ASUBSD
+
+ case gc.OINC<<16 | gc.TINT8,
+ gc.OINC<<16 | gc.TUINT8:
+ a = x86.AINCB
+
+ case gc.OINC<<16 | gc.TINT16,
+ gc.OINC<<16 | gc.TUINT16:
+ a = x86.AINCW
+
+ case gc.OINC<<16 | gc.TINT32,
+ gc.OINC<<16 | gc.TUINT32,
+ gc.OINC<<16 | gc.TPTR32:
+ a = x86.AINCL
+
+ case gc.OINC<<16 | gc.TINT64,
+ gc.OINC<<16 | gc.TUINT64,
+ gc.OINC<<16 | gc.TPTR64:
+ a = x86.AINCQ
+
+ case gc.ODEC<<16 | gc.TINT8,
+ gc.ODEC<<16 | gc.TUINT8:
+ a = x86.ADECB
+
+ case gc.ODEC<<16 | gc.TINT16,
+ gc.ODEC<<16 | gc.TUINT16:
+ a = x86.ADECW
+
+ case gc.ODEC<<16 | gc.TINT32,
+ gc.ODEC<<16 | gc.TUINT32,
+ gc.ODEC<<16 | gc.TPTR32:
+ a = x86.ADECL
+
+ case gc.ODEC<<16 | gc.TINT64,
+ gc.ODEC<<16 | gc.TUINT64,
+ gc.ODEC<<16 | gc.TPTR64:
+ a = x86.ADECQ
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8:
+ a = x86.ANEGB
+
+ case gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16:
+ a = x86.ANEGW
+
+ case gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = x86.ANEGL
+
+ case gc.OMINUS<<16 | gc.TINT64,
+ gc.OMINUS<<16 | gc.TUINT64,
+ gc.OMINUS<<16 | gc.TPTR64:
+ a = x86.ANEGQ
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8:
+ a = x86.AANDB
+
+ case gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16:
+ a = x86.AANDW
+
+ case gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = x86.AANDL
+
+ case gc.OAND<<16 | gc.TINT64,
+ gc.OAND<<16 | gc.TUINT64,
+ gc.OAND<<16 | gc.TPTR64:
+ a = x86.AANDQ
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8:
+ a = x86.AORB
+
+ case gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16:
+ a = x86.AORW
+
+ case gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = x86.AORL
+
+ case gc.OOR<<16 | gc.TINT64,
+ gc.OOR<<16 | gc.TUINT64,
+ gc.OOR<<16 | gc.TPTR64:
+ a = x86.AORQ
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8:
+ a = x86.AXORB
+
+ case gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16:
+ a = x86.AXORW
+
+ case gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = x86.AXORL
+
+ case gc.OXOR<<16 | gc.TINT64,
+ gc.OXOR<<16 | gc.TUINT64,
+ gc.OXOR<<16 | gc.TPTR64:
+ a = x86.AXORQ
+
+ case gc.OLROT<<16 | gc.TINT8,
+ gc.OLROT<<16 | gc.TUINT8:
+ a = x86.AROLB
+
+ case gc.OLROT<<16 | gc.TINT16,
+ gc.OLROT<<16 | gc.TUINT16:
+ a = x86.AROLW
+
+ case gc.OLROT<<16 | gc.TINT32,
+ gc.OLROT<<16 | gc.TUINT32,
+ gc.OLROT<<16 | gc.TPTR32:
+ a = x86.AROLL
+
+ case gc.OLROT<<16 | gc.TINT64,
+ gc.OLROT<<16 | gc.TUINT64,
+ gc.OLROT<<16 | gc.TPTR64:
+ a = x86.AROLQ
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8:
+ a = x86.ASHLB
+
+ case gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16:
+ a = x86.ASHLW
+
+ case gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = x86.ASHLL
+
+ case gc.OLSH<<16 | gc.TINT64,
+ gc.OLSH<<16 | gc.TUINT64,
+ gc.OLSH<<16 | gc.TPTR64:
+ a = x86.ASHLQ
+
+ case gc.ORSH<<16 | gc.TUINT8:
+ a = x86.ASHRB
+
+ case gc.ORSH<<16 | gc.TUINT16:
+ a = x86.ASHRW
+
+ case gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = x86.ASHRL
+
+ case gc.ORSH<<16 | gc.TUINT64,
+ gc.ORSH<<16 | gc.TPTR64:
+ a = x86.ASHRQ
+
+ case gc.ORSH<<16 | gc.TINT8:
+ a = x86.ASARB
+
+ case gc.ORSH<<16 | gc.TINT16:
+ a = x86.ASARW
+
+ case gc.ORSH<<16 | gc.TINT32:
+ a = x86.ASARL
+
+ case gc.ORSH<<16 | gc.TINT64:
+ a = x86.ASARQ
+
+ case gc.ORROTC<<16 | gc.TINT8,
+ gc.ORROTC<<16 | gc.TUINT8:
+ a = x86.ARCRB
+
+ case gc.ORROTC<<16 | gc.TINT16,
+ gc.ORROTC<<16 | gc.TUINT16:
+ a = x86.ARCRW
+
+ case gc.ORROTC<<16 | gc.TINT32,
+ gc.ORROTC<<16 | gc.TUINT32:
+ a = x86.ARCRL
+
+ case gc.ORROTC<<16 | gc.TINT64,
+ gc.ORROTC<<16 | gc.TUINT64:
+ a = x86.ARCRQ
+
+ case gc.OHMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TUINT8:
+ a = x86.AIMULB
+
+ case gc.OHMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TUINT16:
+ a = x86.AIMULW
+
+ case gc.OHMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = x86.AIMULL
+
+ case gc.OHMUL<<16 | gc.TINT64,
+ gc.OMUL<<16 | gc.TINT64,
+ gc.OMUL<<16 | gc.TUINT64,
+ gc.OMUL<<16 | gc.TPTR64:
+ a = x86.AIMULQ
+
+ case gc.OHMUL<<16 | gc.TUINT8:
+ a = x86.AMULB
+
+ case gc.OHMUL<<16 | gc.TUINT16:
+ a = x86.AMULW
+
+ case gc.OHMUL<<16 | gc.TUINT32,
+ gc.OHMUL<<16 | gc.TPTR32:
+ a = x86.AMULL
+
+ case gc.OHMUL<<16 | gc.TUINT64,
+ gc.OHMUL<<16 | gc.TPTR64:
+ a = x86.AMULQ
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = x86.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = x86.AMULSD
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT8:
+ a = x86.AIDIVB
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT8:
+ a = x86.ADIVB
+
+ case gc.ODIV<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT16:
+ a = x86.AIDIVW
+
+ case gc.ODIV<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT16:
+ a = x86.ADIVW
+
+ case gc.ODIV<<16 | gc.TINT32,
+ gc.OMOD<<16 | gc.TINT32:
+ a = x86.AIDIVL
+
+ case gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = x86.ADIVL
+
+ case gc.ODIV<<16 | gc.TINT64,
+ gc.OMOD<<16 | gc.TINT64:
+ a = x86.AIDIVQ
+
+ case gc.ODIV<<16 | gc.TUINT64,
+ gc.ODIV<<16 | gc.TPTR64,
+ gc.OMOD<<16 | gc.TUINT64,
+ gc.OMOD<<16 | gc.TPTR64:
+ a = x86.ADIVQ
+
+ case gc.OEXTEND<<16 | gc.TINT16:
+ a = x86.ACWD
+
+ case gc.OEXTEND<<16 | gc.TINT32:
+ a = x86.ACDQ
+
+ case gc.OEXTEND<<16 | gc.TINT64:
+ a = x86.ACQO
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = x86.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = x86.ADIVSD
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func xgen(n *gc.Node, a *gc.Node, o int) int {
+ regalloc(a, gc.Types[gc.Tptr], nil)
+
+ if o&ODynam != 0 {
+ if n.Addable != 0 {
+ if n.Op != gc.OINDREG {
+ if n.Op != gc.OREGISTER {
+ return 1
+ }
+ }
+ }
+ }
+
+ agen(n, a)
+ return 0
+}
+
+func sudoclean() {
+ if clean[cleani-1].Op != gc.OEMPTY {
+ regfree(&clean[cleani-1])
+ }
+ if clean[cleani-2].Op != gc.OEMPTY {
+ regfree(&clean[cleani-2])
+ }
+ cleani -= 2
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+ var o int
+ var i int
+ var oary [10]int64
+ var v int64
+ var w int64
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var nn *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var reg *gc.Node
+ var reg1 *gc.Node
+ var p1 *obj.Prog
+ var t *gc.Type
+
+ if n.Type == nil {
+ return 0
+ }
+
+ *a = obj.Addr{}
+
+ switch n.Op {
+ case gc.OLITERAL:
+ if !(gc.Isconst(n, gc.CTINT) != 0) {
+ break
+ }
+ v = gc.Mpgetfix(n.Val.U.Xval)
+ if v >= 32000 || v <= -32000 {
+ break
+ }
+ goto lit
+
+ case gc.ODOT,
+ gc.ODOTPTR:
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto odot
+
+ case gc.OINDEX:
+ return 0
+
+ // disabled: OINDEX case is now covered by agenr
+ // for a more suitable register allocation pattern.
+ if n.Left.Type.Etype == gc.TSTRING {
+ return 0
+ }
+ goto oindex
+ }
+
+ return 0
+
+lit:
+ switch as {
+ default:
+ return 0
+
+ case x86.AADDB,
+ x86.AADDW,
+ x86.AADDL,
+ x86.AADDQ,
+ x86.ASUBB,
+ x86.ASUBW,
+ x86.ASUBL,
+ x86.ASUBQ,
+ x86.AANDB,
+ x86.AANDW,
+ x86.AANDL,
+ x86.AANDQ,
+ x86.AORB,
+ x86.AORW,
+ x86.AORL,
+ x86.AORQ,
+ x86.AXORB,
+ x86.AXORW,
+ x86.AXORL,
+ x86.AXORQ,
+ x86.AINCB,
+ x86.AINCW,
+ x86.AINCL,
+ x86.AINCQ,
+ x86.ADECB,
+ x86.ADECW,
+ x86.ADECL,
+ x86.ADECQ,
+ x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ goto yes
+
+odot:
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ goto no
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 = *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ goto yes
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i = 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(movptr, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ fixlargeoffset(&n1)
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex:
+ l = n.Left
+ r = n.Right
+ if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
+ return 0
+ }
+
+ // set o to type of array
+ o = 0
+
+ if gc.Isptr[l.Type.Etype] != 0 {
+ gc.Fatal("ptr ary")
+ }
+ if l.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ary")
+ }
+ if l.Type.Bound < 0 {
+ o |= ODynam
+ }
+
+ w = n.Type.Width
+ if gc.Isconst(r, gc.CTINT) != 0 {
+ goto oindex_const
+ }
+
+ switch w {
+ default:
+ return 0
+
+ case 1,
+ 2,
+ 4,
+ 8:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+
+ // load the array (reg)
+ if l.Ullman > r.Ullman {
+ if xgen(l, reg, o) != 0 {
+ o |= OAddable
+ }
+ }
+
+ // load the index (reg1)
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[r.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+ regalloc(reg1, t, nil)
+ regalloc(&n3, r.Type, reg1)
+ cgen(r, &n3)
+ gmove(&n3, reg1)
+ regfree(&n3)
+
+ // load the array (reg)
+ if l.Ullman <= r.Ullman {
+ if xgen(l, reg, o) != 0 {
+ o |= OAddable
+ }
+ }
+
+ // check bounds
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ // check bounds
+ n4.Op = gc.OXXX
+
+ t = gc.Types[gc.Simtype[gc.TUINT]]
+ if o&ODynam != 0 {
+ if o&OAddable != 0 {
+ n2 = *l
+ n2.Xoffset += int64(gc.Array_nel)
+ n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ } else {
+ n2 = *reg
+ n2.Xoffset = int64(gc.Array_nel)
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ }
+ } else {
+ if gc.Is64(r.Type) != 0 {
+ t = gc.Types[gc.TUINT64]
+ }
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
+ }
+
+ gins(optoas(gc.OCMP, t), reg1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ if n4.Op != gc.OXXX {
+ regfree(&n4)
+ }
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if o&ODynam != 0 {
+ if o&OAddable != 0 {
+ n2 = *l
+ n2.Xoffset += int64(gc.Array_array)
+ n2.Type = gc.Types[gc.Tptr]
+ gmove(&n2, reg)
+ } else {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Array_array)
+ n2.Type = gc.Types[gc.Tptr]
+ gmove(&n2, reg)
+ }
+ }
+
+ if o&OAddable != 0 {
+ gc.Naddr(reg1, a, 1)
+ a.Offset = 0
+ a.Scale = int8(w)
+ a.Index = a.Reg
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ } else {
+ gc.Naddr(reg1, a, 1)
+ a.Offset = 0
+ a.Scale = int8(w)
+ a.Index = a.Reg
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ }
+
+ goto yes
+
+ // index is constant
+ // can check statically and
+ // can multiply by width statically
+
+oindex_const:
+ v = gc.Mpgetfix(r.Val.U.Xval)
+
+ if sudoaddable(as, l, a) != 0 {
+ goto oindex_const_sudo
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+
+ if o&ODynam != 0 {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ agen(l, reg)
+
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, reg)
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * w
+ fixlargeoffset(&n2)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+ }
+
+ igen(l, &n1, nil)
+ if n1.Op == gc.OINDREG {
+ *reg = n1
+ reg.Op = gc.OREGISTER
+ }
+
+ n1.Xoffset += v * w
+ fixlargeoffset(&n1)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex_const_sudo:
+ if o&ODynam == 0 {
+ // array indexed by a constant
+ a.Offset += v * w
+
+ goto yes
+ }
+
+ // slice indexed by a constant
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ a.Offset += int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
+ p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
+ p1.From = *a
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ a.Offset -= int64(gc.Array_nel)
+ }
+
+ a.Offset += int64(gc.Array_array)
+ reg = &clean[cleani-1]
+ if reg.Op == gc.OEMPTY {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ }
+
+ p1 = gins(movptr, nil, reg)
+ p1.From = *a
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * w
+ fixlargeoffset(&n2)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+
+yes:
+ return 1
+
+no:
+ sudoclean()
+ return 0
+}
diff --git a/src/cmd/new6g/peep.go b/src/cmd/new6g/peep.go
new file mode 100644
index 0000000000..7b262d6896
--- /dev/null
+++ b/src/cmd/new6g/peep.go
@@ -0,0 +1,1077 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+const (
+ exregoffset = x86.REG_R15
+)
+
+// do we need the carry bit
+func needc(p *obj.Prog) int {
+ var info gc.ProgInfo
+
+ for p != nil {
+ proginfo(&info, p)
+ if info.Flags&gc.UseCarry != 0 {
+ return 1
+ }
+ if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
+ return 0
+ }
+ p = p.Link
+ }
+
+ return 0
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+ var p *obj.Prog
+ var r1 *gc.Flow
+
+ if r != nil {
+ for {
+ p = r.Prog
+ if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+ break
+ }
+ r1 = gc.Uniqs(r)
+ if r1 == nil {
+ break
+ }
+ r = r1
+ }
+ }
+
+ return r
+}
+
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ // byte, word arithmetic elimination.
+ elimshortmov(g)
+
+ // constant propagation
+ // find MOV $con,R followed by
+ // another MOV $con,R without
+ // setting R in the interim
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.ALEAL,
+ x86.ALEAQ:
+ if regtyp(&p.To) != 0 {
+ if p.From.Sym != nil {
+ if p.From.Index == x86.REG_NONE {
+ conprop(r)
+ }
+ }
+ }
+
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if regtyp(&p.To) != 0 {
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+ conprop(r)
+ }
+ }
+ }
+ }
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if regtyp(&p.To) != 0 {
+ if regtyp(&p.From) != 0 {
+ if copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ } else if subprop(r) != 0 && copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ case x86.AMOVBLZX,
+ x86.AMOVWLZX,
+ x86.AMOVBLSX,
+ x86.AMOVWLSX:
+ if regtyp(&p.To) != 0 {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = x86.AMOVL
+ t++
+ }
+ }
+ }
+
+ case x86.AMOVBQSX,
+ x86.AMOVBQZX,
+ x86.AMOVWQSX,
+ x86.AMOVWQZX,
+ x86.AMOVLQSX,
+ x86.AMOVLQZX,
+ x86.AMOVQL:
+ if regtyp(&p.To) != 0 {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = x86.AMOVQ
+ t++
+ }
+ }
+ }
+
+ case x86.AADDL,
+ x86.AADDQ,
+ x86.AADDW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == x86.AADDQ {
+ p.As = x86.ADECQ
+ } else if p.As == x86.AADDL {
+ p.As = x86.ADECL
+ } else {
+ p.As = x86.ADECW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == x86.AADDQ {
+ p.As = x86.AINCQ
+ } else if p.As == x86.AADDL {
+ p.As = x86.AINCL
+ } else {
+ p.As = x86.AINCW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ case x86.ASUBL,
+ x86.ASUBQ,
+ x86.ASUBW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == x86.ASUBQ {
+ p.As = x86.AINCQ
+ } else if p.As == x86.ASUBL {
+ p.As = x86.AINCL
+ } else {
+ p.As = x86.AINCW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == x86.ASUBQ {
+ p.As = x86.ADECQ
+ } else if p.As == x86.ASUBL {
+ p.As = x86.ADECL
+ } else {
+ p.As = x86.ADECW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ // MOVLQZX removal.
+ // The MOVLQZX exists to avoid being confused for a
+ // MOVL that is just copying 32-bit data around during
+ // copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
+ // if it is dominated by an earlier ADDL/MOVL/etc into R1 that
+ // will have already cleared the high bits.
+ //
+ // MOVSD removal.
+ // We never use packed registers, so a MOVSD between registers
+ // can be replaced by MOVAPD, which moves the pair of float64s
+ // instead of just the lower one. We only use the lower one, but
+ // the processor can do better if we do moves using both.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if p.As == x86.AMOVLQZX {
+ if regtyp(&p.From) != 0 {
+ if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
+ if prevl(r, int(p.From.Reg)) != 0 {
+ excise(r)
+ }
+ }
+ }
+ }
+
+ if p.As == x86.AMOVSD {
+ if regtyp(&p.From) != 0 {
+ if regtyp(&p.To) != 0 {
+ p.As = x86.AMOVAPD
+ }
+ }
+ }
+ }
+
+ // load pipelining
+ // push any load from memory as early as possible
+ // to give it time to complete before use.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVLQZX:
+ if regtyp(&p.To) != 0 && !(regconsttyp(&p.From) != 0) {
+ pushback(r)
+ }
+ }
+ }
+
+ gc.Flowend(g)
+}
+
+func pushback(r0 *gc.Flow) {
+ var r *gc.Flow
+ var b *gc.Flow
+ var p0 *obj.Prog
+ var p *obj.Prog
+ var t obj.Prog
+
+ b = nil
+ p0 = r0.Prog
+ for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
+ p = r.Prog
+ if p.As != obj.ANOP {
+ if !(regconsttyp(&p.From) != 0) || !(regtyp(&p.To) != 0) {
+ break
+ }
+ if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
+ break
+ }
+ }
+
+ if p.As == obj.ACALL {
+ break
+ }
+ b = r
+ }
+
+ if b == nil {
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("no pushback: %v\n", r0.Prog)
+ if r != nil {
+ fmt.Printf("\t%v [%d]\n", r.Prog, gc.Uniqs(r) != nil)
+ }
+ }
+
+ return
+ }
+
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("pushback\n")
+ for r = b; ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+
+ t = *r0.Prog
+ for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
+ p0 = r.Link.Prog
+ p = r.Prog
+ p0.As = p.As
+ p0.Lineno = p.Lineno
+ p0.From = p.From
+ p0.To = p.To
+
+ if r == b {
+ break
+ }
+ }
+
+ p0 = r.Prog
+ p0.As = t.As
+ p0.Lineno = t.Lineno
+ p0.From = t.From
+ p0.To = t.To
+
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("\tafter\n")
+ for r = b; ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+
+ obj.Nopout(p)
+
+ gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15))
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible. a movb into a register
+// can smash the entire 32-bit register without
+// causing any trouble.
+//
+// TODO: Using the Q forms here instead of the L forms
+// seems unnecessary, and it makes the instructions longer.
+func elimshortmov(g *gc.Graph) {
+ var p *obj.Prog
+ var r *gc.Flow
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if regtyp(&p.To) != 0 {
+ switch p.As {
+ case x86.AINCB,
+ x86.AINCW:
+ p.As = x86.AINCQ
+
+ case x86.ADECB,
+ x86.ADECW:
+ p.As = x86.ADECQ
+
+ case x86.ANEGB,
+ x86.ANEGW:
+ p.As = x86.ANEGQ
+
+ case x86.ANOTB,
+ x86.ANOTW:
+ p.As = x86.ANOTQ
+ }
+
+ if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+ // move or artihmetic into partial register.
+ // from another register or constant can be movl.
+ // we don't switch to 64-bit arithmetic if it can
+ // change how the carry bit is set (and the carry bit is needed).
+ switch p.As {
+ case x86.AMOVB,
+ x86.AMOVW:
+ p.As = x86.AMOVQ
+
+ case x86.AADDB,
+ x86.AADDW:
+ if !(needc(p.Link) != 0) {
+ p.As = x86.AADDQ
+ }
+
+ case x86.ASUBB,
+ x86.ASUBW:
+ if !(needc(p.Link) != 0) {
+ p.As = x86.ASUBQ
+ }
+
+ case x86.AMULB,
+ x86.AMULW:
+ p.As = x86.AMULQ
+
+ case x86.AIMULB,
+ x86.AIMULW:
+ p.As = x86.AIMULQ
+
+ case x86.AANDB,
+ x86.AANDW:
+ p.As = x86.AANDQ
+
+ case x86.AORB,
+ x86.AORW:
+ p.As = x86.AORQ
+
+ case x86.AXORB,
+ x86.AXORW:
+ p.As = x86.AXORQ
+
+ case x86.ASHLB,
+ x86.ASHLW:
+ p.As = x86.ASHLQ
+ }
+ } else if p.From.Type != obj.TYPE_REG {
+ // explicit zero extension, but don't
+ // do that if source is a byte register
+ // (only AH can occur and it's forbidden).
+ switch p.As {
+ case x86.AMOVB:
+ p.As = x86.AMOVBQZX
+
+ case x86.AMOVW:
+ p.As = x86.AMOVWQZX
+ }
+ }
+ }
+ }
+}
+
+// is 'a' a register or constant?
+func regconsttyp(a *obj.Addr) int {
+ if regtyp(a) != 0 {
+ return 1
+ }
+ switch a.Type {
+ case obj.TYPE_CONST,
+ obj.TYPE_FCONST,
+ obj.TYPE_SCONST,
+ obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
+ return 1
+ }
+
+ return 0
+}
+
+// is reg guaranteed to be truncated by a previous L instruction?
+func prevl(r0 *gc.Flow, reg int) int {
+ var p *obj.Prog
+ var r *gc.Flow
+ var info gc.ProgInfo
+
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ p = r.Prog
+ if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
+ proginfo(&info, p)
+ if info.Flags&gc.RightWrite != 0 {
+ if info.Flags&gc.SizeL != 0 {
+ return 1
+ }
+ return 0
+ }
+ }
+ }
+
+ return 0
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) int {
+ var p *obj.Prog
+ var info gc.ProgInfo
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("subprop %v\n", r0.Prog)
+ }
+ p = r0.Prog
+ v1 = &p.From
+ if !(regtyp(v1) != 0) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
+ }
+ return 0
+ }
+
+ v2 = &p.To
+ if !(regtyp(v2) != 0) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return 0
+ }
+
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\t? %v\n", r.Prog)
+ }
+ if gc.Uniqs(r) == nil {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tno unique successor\n")
+ }
+ break
+ }
+
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tfound %v; return 0\n", p)
+ }
+ return 0
+ }
+
+ if info.Reguse|info.Regset != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tfound %v; return 0\n", p)
+ }
+ return 0
+ }
+
+ if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+ goto gotit
+ }
+
+ if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
+ }
+ break
+ }
+
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopysub failed\n")
+ }
+ break
+ }
+ }
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tran off end; return 0\n")
+ }
+ return 0
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return 1
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("copyprop %v\n", r0.Prog)
+ }
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) != 0 {
+ return 1
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return 1
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if !(f != 0) && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return 0
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if !(gc.Debug['P'] != 0) {
+ return 0
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return 0
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return 0
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+ }
+ }
+
+ if !(f != 0) {
+ t = copyu(p, v1, nil)
+ if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !(copy1(v1, v2, r.S2, f) != 0) {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ var info gc.ProgInfo
+
+ switch p.As {
+ case obj.AJMP:
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case obj.ARET:
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case obj.ACALL:
+ if x86.REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= x86.REGEXT && v.Reg > exregoffset {
+ return 2
+ }
+ if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+ return 2
+ }
+ if v.Type == p.From.Type && v.Reg == p.From.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 4
+ }
+ return 3
+
+ case obj.ATEXT:
+ if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+ return 3
+ }
+ return 0
+ }
+
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ return 0
+ }
+ proginfo(&info, p)
+
+ if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
+ return 2
+ }
+
+ if info.Flags&gc.LeftAddr != 0 {
+ if copyas(&p.From, v) != 0 {
+ return 2
+ }
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+ if copyas(&p.To, v) != 0 {
+ return 2
+ }
+ }
+
+ if info.Flags&gc.RightWrite != 0 {
+ if copyas(&p.To, v) != 0 {
+ if s != nil {
+ return copysub(&p.From, v, s, 1)
+ }
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+ }
+
+ if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) int {
+ if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
+ gc.Fatal("use of byte register")
+ }
+ if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B {
+ gc.Fatal("use of byte register")
+ }
+
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return 0
+ }
+ if regtyp(v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return 1
+ }
+ }
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) int {
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return 0
+ }
+ if regtyp(v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) int {
+ if copyas(a, v) != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: copyas returned 1\n")
+ }
+ return 1
+ }
+
+ if regtyp(v) != 0 {
+ if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: found indir use - return 1\n")
+ }
+ return 1
+ }
+
+ if a.Index == v.Reg {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: found index use - return 1\n")
+ }
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ var reg int
+
+ if copyas(a, v) != 0 {
+ reg = int(s.Reg)
+ if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
+ if f != 0 {
+ a.Reg = int16(reg)
+ }
+ }
+
+ return 0
+ }
+
+ if regtyp(v) != 0 {
+ reg = int(v.Reg)
+ if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
+ return 1 /* can't use BP-base with index */
+ }
+ if f != 0 {
+ a.Reg = s.Reg
+ }
+ }
+
+ // return 0;
+ if int(a.Index) == reg {
+ if f != 0 {
+ a.Index = s.Reg
+ }
+ return 0
+ }
+
+ return 0
+ }
+
+ return 0
+}
+
+func conprop(r0 *gc.Flow) {
+ var r *gc.Flow
+ var p *obj.Prog
+ var p0 *obj.Prog
+ var t int
+ var v0 *obj.Addr
+
+ p0 = r0.Prog
+ v0 = &p0.To
+ r = r0
+
+loop:
+ r = gc.Uniqs(r)
+ if r == nil || r == r0 {
+ return
+ }
+ if gc.Uniqp(r) == nil {
+ return
+ }
+
+ p = r.Prog
+ t = copyu(p, v0, nil)
+ switch t {
+ case 0, // miss
+ 1: // use
+ goto loop
+
+ case 2, // rar
+ 4: // use and set
+ break
+
+ case 3: // set
+ if p.As == p0.As {
+ if p.From.Type == p0.From.Type {
+ if p.From.Reg == p0.From.Reg {
+ if p.From.Node == p0.From.Node {
+ if p.From.Offset == p0.From.Offset {
+ if p.From.Scale == p0.From.Scale {
+ if p.From.Type == obj.TYPE_FCONST && p.From.U.Dval == p0.From.U.Dval {
+ if p.From.Index == p0.From.Index {
+ excise(r)
+ goto loop
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) int {
+ return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+}
+
+func stackaddr(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP)
+}
diff --git a/src/cmd/new6g/prog.go b/src/cmd/new6g/prog.go
new file mode 100644
index 0000000000..3f4c19567c
--- /dev/null
+++ b/src/cmd/new6g/prog.go
@@ -0,0 +1,272 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+var (
+ AX = RtoB(x86.REG_AX)
+ BX = RtoB(x86.REG_BX)
+ CX = RtoB(x86.REG_CX)
+ DX = RtoB(x86.REG_DX)
+ DI = RtoB(x86.REG_DI)
+ SI = RtoB(x86.REG_SI)
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [x86.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ x86.AADCL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADCQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADCW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AADDSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AANDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ obj.ACALL: gc.ProgInfo{gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+ x86.ACDQ: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACQO: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACWD: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACLD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ x86.ASTD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ x86.ACMPB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACVTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSD2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSL2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSL2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSQ2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSQ2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ADECB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.ADECL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.ADECQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.ADECW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.ADIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.ADIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ADIVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AIDIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AIDIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIDIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIDIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AIMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AIMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AIMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AINCB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.AINCL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.AINCQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.AINCW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.AJCC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJCS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJEQ: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJGE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJGT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJHI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJMI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJNE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJOC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJOS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPL: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ obj.AJMP: gc.ProgInfo{gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+ x86.ALEAL: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ x86.ALEAQ: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ x86.AMOVBLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBWSX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBWZX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVLQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVLQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVQL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVSB: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSL: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSQ: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSW: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.OK, DI | SI, DI | SI | CX, 0},
+ x86.AMOVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // We use MOVAPD as a faster synonym for MOVSD.
+ x86.AMOVAPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AMULSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ANEGB: gc.ProgInfo{gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGL: gc.ProgInfo{gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGQ: gc.ProgInfo{gc.SizeQ | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGW: gc.ProgInfo{gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANOTB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.ANOTL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.ANOTQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.ANOTW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.AORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.APOPQ: gc.ProgInfo{gc.SizeQ | gc.RightWrite, 0, 0, 0},
+ x86.APUSHQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead, 0, 0, 0},
+ x86.ARCLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AREP: gc.ProgInfo{gc.OK, CX, CX, 0},
+ x86.AREPN: gc.ProgInfo{gc.OK, CX, CX, 0},
+ obj.ARET: gc.ProgInfo{gc.Break | gc.KillCarry, 0, 0, 0},
+ x86.AROLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASBBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASHLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASTOSB: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSL: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSQ: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSW: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASUBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ASUBSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ATESTB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.AUCOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ x86.AUCOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ x86.AXCHGB: gc.ProgInfo{gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGL: gc.ProgInfo{gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGQ: gc.ProgInfo{gc.SizeQ | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGW: gc.ProgInfo{gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+ info.Reguse |= CX
+ }
+
+ if info.Flags&gc.ImulAXDX != 0 {
+ if p.To.Type == obj.TYPE_NONE {
+ info.Reguse |= AX
+ info.Regset |= AX | DX
+ } else {
+ info.Flags |= RightRdwr
+ }
+ }
+
+ // Addressing makes some registers used.
+ if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ }
+ if p.From.Index != x86.REG_NONE {
+ info.Regindex |= RtoB(int(p.From.Index))
+ }
+ if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ }
+ if p.To.Index != x86.REG_NONE {
+ info.Regindex |= RtoB(int(p.To.Index))
+ }
+}
diff --git a/src/cmd/new6g/reg.go b/src/cmd/new6g/reg.go
new file mode 100644
index 0000000000..0629a6248d
--- /dev/null
+++ b/src/cmd/new6g/reg.go
@@ -0,0 +1,144 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 32
+)
+
+var regname = []string{
+ ".AX",
+ ".CX",
+ ".DX",
+ ".BX",
+ ".SP",
+ ".BP",
+ ".SI",
+ ".DI",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".X0",
+ ".X1",
+ ".X2",
+ ".X3",
+ ".X4",
+ ".X5",
+ ".X6",
+ ".X7",
+ ".X8",
+ ".X9",
+ ".X10",
+ ".X11",
+ ".X12",
+ ".X13",
+ ".X14",
+ ".X15",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(x86.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+ var b uint64
+
+ b = 0
+ if r >= x86.REG_AX && r <= x86.REG_R15 {
+ b |= RtoB(r)
+ } else if r >= x86.REG_AL && r <= x86.REG_R15B {
+ b |= RtoB(r - x86.REG_AL + x86.REG_AX)
+ } else if r >= x86.REG_AH && r <= x86.REG_BH {
+ b |= RtoB(r - x86.REG_AH + x86.REG_AX)
+ } else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
+ b |= FtoB(r)
+ }
+ return b
+}
+
+func RtoB(r int) uint64 {
+ if r < x86.REG_AX || r > x86.REG_R15 {
+ return 0
+ }
+ return 1 << uint(r-x86.REG_AX)
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffff
+ if gc.Nacl {
+ b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
+ } else if obj.Framepointer_enabled != 0 {
+ // BP is part of the calling convention if framepointer_enabled.
+ b &^= (1 << (x86.REG_BP - x86.REG_AX))
+ }
+
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + x86.REG_AX
+}
+
+/*
+ * bit reg
+ * 16 X0
+ * ...
+ * 31 X15
+ */
+func FtoB(f int) uint64 {
+ if f < x86.REG_X0 || f > x86.REG_X15 {
+ return 0
+ }
+ return 1 << uint(f-x86.REG_X0+16)
+}
+
+func BtoF(b uint64) int {
+ b &= 0xFFFF0000
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 16 + x86.REG_X0
+}
diff --git a/src/cmd/new6g/util.go b/src/cmd/new6g/util.go
new file mode 100644
index 0000000000..bb5eedb15a
--- /dev/null
+++ b/src/cmd/new6g/util.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/src/cmd/new8g/cgen.go b/src/cmd/new8g/cgen.go
new file mode 100644
index 0000000000..9d71aebd13
--- /dev/null
+++ b/src/cmd/new8g/cgen.go
@@ -0,0 +1,1731 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+ */
+func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
+ var n2 gc.Node
+
+ n1.Op = gc.OEMPTY
+
+ if n.Addable != 0 {
+ *n1 = *n
+ if n1.Op == gc.OREGISTER || n1.Op == gc.OINDREG {
+ reg[n.Val.U.Reg]++
+ }
+ return
+ }
+
+ gc.Tempname(n1, n.Type)
+ cgen(n, n1)
+ if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] != 0 {
+ n2 = *n1
+ regalloc(n1, n.Type, rg)
+ gmove(&n2, n1)
+ }
+}
+
+func mfree(n *gc.Node) {
+ if n.Op == gc.OREGISTER {
+ regfree(n)
+ }
+}
+
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ *
+ * TODO:
+ * sudoaddable
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var a int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ gc.Fatal("cgen: n nil")
+ }
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ return
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ // function calls on both sides? introduce temporary
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // structs etc get handled specially
+ if gc.Isfat(n.Type) != 0 {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ return
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 && res.Addable != 0 {
+ gmove(n, res)
+ return
+ }
+
+ // if both are not addressable, use a temporary.
+ if !(n.Addable != 0) && !(res.Addable != 0) {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ gc.Tempname(&n1, n.Type)
+
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if !(res.Addable != 0) {
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ return
+ }
+
+ // complex types
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ return
+ }
+
+ // otherwise, the result is addressable but n is not.
+ // let's do some computation.
+
+ // use ullman to pick operand to eval first.
+ nl = n.Left
+
+ nr = n.Right
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ // both are hard
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ return
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+ switch n.Op {
+ // math goes to cgen64.
+ case gc.OMINUS,
+ gc.OCOM,
+ gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ cgen64(n, res)
+
+ return
+ }
+ }
+
+ if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
+ cgen_float(n, res)
+ return
+ }
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen %v", gc.Oconv(int(n.Op), 0))
+
+ case gc.OREAL,
+ gc.OIMAG,
+ gc.OCOMPLEX:
+ gc.Fatal("unexpected complex")
+ return
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(1), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(0), res)
+ gc.Patch(p3, gc.Pc)
+ return
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ return
+
+ case gc.OMINUS,
+ gc.OCOM:
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ if a == i386.AIMULB {
+ cgen_bmul(int(n.Op), nl, nr, res)
+ break
+ }
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ cgen(nl, res)
+ break
+ }
+
+ gc.Tempname(&n2, n.Type)
+ mgen(nl, &n1, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ mfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OITAB:
+ igen(nl, &n1, res)
+ n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR])
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(i386.ALEAL, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // map has len in the first 32-bit word.
+ // a zero pointer means zero length
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+
+ cgen(nl, &n1)
+ regalloc(&n2, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ n1 = n2
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ // both slice and string have len one pointer into the struct.
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // chan has cap in the second 32-bit word.
+ // a zero pointer means zero length
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+
+ cgen(nl, &n1)
+ regalloc(&n2, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ n1 = n2
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = 4
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ agen(nl, res)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ cgen_div(int(n.Op), nl, nr, res)
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ }
+
+ return
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if gc.Smallintconst(nr) != 0 {
+ mgen(nl, &n1, res)
+ regalloc(&n2, nl.Type, &n1)
+ gmove(&n1, &n2)
+ gins(a, nr, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ mfree(&n1)
+ } else if nl.Ullman >= nr.Ullman {
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ mgen(nr, &n2, nil)
+ regalloc(&n1, nl.Type, res)
+ gmove(&nt, &n1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ mfree(&n2)
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+ }
+
+ return
+
+uop: // unary
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ return
+}
+
+/*
+ * generate an addressable node in res, containing the value of n.
+ * n is an array index, and might be any size; res width is <= 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+ var tmp gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var zero gc.Node
+
+ if !(gc.Is64(n.Type) != 0) {
+ if n.Addable != 0 {
+ // nothing to do.
+ *res = *n
+ } else {
+ gc.Tempname(res, gc.Types[gc.TUINT32])
+ cgen(n, res)
+ }
+
+ return nil
+ }
+
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(n, &tmp)
+ split64(&tmp, &lo, &hi)
+ gc.Tempname(res, gc.Types[gc.TUINT32])
+ gmove(&lo, res)
+ if bounded != 0 {
+ splitclean()
+ return nil
+ }
+
+ gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+ gins(i386.ACMPL, &hi, &zero)
+ splitclean()
+ return gc.Gbranch(i386.AJNE, nil, +1)
+}
+
+/*
+ * address gen
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var nlen gc.Node
+ var t *gc.Type
+ var w uint32
+ var v uint64
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var bounded int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil || res == nil || res.Type == nil {
+ gc.Fatal("agen")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ return
+ }
+
+ // addressable var is easy
+ if n.Addable != 0 {
+ if n.Op == gc.OREGISTER {
+ gc.Fatal("agen OREGISTER")
+ }
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, n, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ // let's compute
+ nl = n.Left
+
+ nr = n.Right
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
+ fallthrough
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+ bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+ if nr.Addable != 0 {
+ // Generate &nl first, and move nr into register.
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ igen(nl, &n3, res)
+ }
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ p2 = igenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ // Generate nr first, and move &nl into register.
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ p2 = igenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ igen(nl, &n3, res)
+ }
+ } else {
+ p2 = igenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ igen(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // For fixed array we really want the pointer in n3.
+ if gc.Isfixedarray(nl.Type) != 0 {
+ regalloc(&n2, gc.Types[gc.Tptr], &n3)
+ agen(&n3, &n2)
+ regfree(&n3)
+ n3 = n2
+ }
+
+ // &a[0] is in n3 (allocated in res)
+ // i is in n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Fatal("constant string constant index") // front end should handle
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ nlen = n3
+ nlen.Type = gc.Types[gc.TUINT32]
+ nlen.Xoffset += int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+ }
+
+ // Load base pointer in n2 = n3.
+ regalloc(&n2, gc.Types[gc.Tptr], &n3)
+
+ n3.Type = gc.Types[gc.Tptr]
+ n3.Xoffset += int64(gc.Array_array)
+ gmove(&n3, &n2)
+ regfree(&n3)
+ if v*uint64(w) != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], int64(v*uint64(w)))
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, &n2)
+ }
+
+ gmove(&n2, res)
+ regfree(&n2)
+ break
+ }
+
+ // i is in register n1, extend to 32 bits.
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[n1.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+
+ regalloc(&n2, t, &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ // check bounds
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ nlen = n3
+ nlen.Type = t
+ nlen.Xoffset += int64(gc.Array_nel)
+ } else {
+ gc.Nodconst(&nlen, t, nl.Type.Bound)
+ }
+
+ gins(optoas(gc.OCMP, t), &n2, &nlen)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(i386.ALEAL, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Scale = 1
+ p1.From.Index = n2.Val.U.Reg
+ goto indexdone
+ }
+
+ // Load base pointer in n3.
+ regalloc(&tmp, gc.Types[gc.Tptr], &n3)
+
+ if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ n3.Type = gc.Types[gc.Tptr]
+ n3.Xoffset += int64(gc.Array_array)
+ gmove(&n3, &tmp)
+ }
+
+ regfree(&n3)
+ n3 = tmp
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ // LEAL (n3)(n2*w), n3
+ p1 = gins(i386.ALEAL, &n2, &n3)
+
+ p1.From.Scale = int8(w)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ } else {
+ gc.Nodconst(&tmp, gc.Types[gc.TUINT32], int64(w))
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &tmp, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+
+ case gc.ODOTPTR:
+ t = nl.Type
+ if !(gc.Isptr[t.Etype] != 0) {
+ gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
+ }
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+ }
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != i386.REG_SP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.ODOTPTR:
+ switch n.Left.Op {
+ // igen-able nodes.
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n.Left, &n1, res)
+
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen(n.Left, a)
+ }
+
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = i386.REG_SP
+ a.Addable = 1
+ a.Xoffset = fp.Width
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ // Compute &a.
+ if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ return
+ }
+ }
+ }
+
+ // release register for now, to avoid
+ // confusing tempname.
+ if res != nil && res.Op == gc.OREGISTER {
+ reg[res.Val.U.Reg]--
+ }
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+ agen(n, &n1)
+ if res != nil && res.Op == gc.OREGISTER {
+ reg[res.Val.U.Reg]++
+ }
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * branch gen
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(1)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ return
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ nl = n.Left
+ nr = nil
+
+ if nl != nil && gc.Isfloat[nl.Type.Etype] != 0 {
+ bgen_float(n, bool2int(true_), likely, to)
+ return
+ }
+
+ switch n.Op {
+ default:
+ goto def
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == !(n.Val.U.Bval != 0) {
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ }
+ return
+
+ case gc.ONAME:
+ if !(n.Addable != 0) {
+ goto def
+ }
+ gc.Nodconst(&n1, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), n, &n1)
+ a = i386.AJNE
+ if !true_ {
+ a = i386.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ return
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ return
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ return
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ return
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if gc.Is64(nr.Type) != 0 {
+ if !(nl.Addable != 0) || gc.Isconst(nl, gc.CTINT) != 0 {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if !(nr.Addable != 0) {
+ gc.Tempname(&n2, nr.Type)
+ cgen(nr, &n2)
+ nr = &n2
+ }
+
+ cmp64(nl, nr, a, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ if !(nl.Addable != 0) {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if !(nr.Addable != 0) {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ nr = &n2
+ goto cmp
+ }
+
+ if !(nl.Addable != 0) {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if gc.Smallintconst(nr) != 0 {
+ gins(optoas(gc.OCMP, nr.Type), nl, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ break
+ }
+
+ if !(nr.Addable != 0) {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ cmp:
+ gins(optoas(gc.OCMP, nr.Type), nl, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+ }
+
+ return
+
+def:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = i386.AJNE
+ if !true_ {
+ a = i386.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ return
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int32 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int32
+
+ switch n.Op {
+ case gc.OINDREG:
+ return int32(n.Xoffset)
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return int32(int64(off) + n.Xoffset)
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !(gc.Isfixedarray(t) != 0) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return int32(t.Width)
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * struct gen
+ * memmove(&res, &n, w);
+ */
+func sgen(n *gc.Node, res *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tdst gc.Node
+ var tsrc gc.Node
+ var cx gc.Node
+ var c int32
+ var q int32
+ var odst int32
+ var osrc int32
+ var l *gc.NodeList
+ var p *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", res)
+ }
+
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 || int64(int32(w)) != w {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ if w == 0 {
+ // evaluate side effects only.
+ gc.Tempname(&tdst, gc.Types[gc.Tptr])
+
+ agen(res, &tdst)
+ agen(n, &tdst)
+ return
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if res.Op == gc.ONAME && res.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, res) != 0 {
+ return
+ }
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(res)
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tsrc, n.Type)
+
+ sgen(n, &tsrc, w)
+ sgen(&tsrc, res, w)
+ return
+ }
+
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], i386.REG_DI)
+ gc.Nodreg(&src, gc.Types[gc.Tptr], i386.REG_SI)
+
+ gc.Tempname(&tsrc, gc.Types[gc.Tptr])
+ gc.Tempname(&tdst, gc.Types[gc.Tptr])
+ if !(n.Addable != 0) {
+ agen(n, &tsrc)
+ }
+ if !(res.Addable != 0) {
+ agen(res, &tdst)
+ }
+ if n.Addable != 0 {
+ agen(n, &src)
+ } else {
+ gmove(&tsrc, &src)
+ }
+
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+
+ if res.Addable != 0 {
+ agen(res, &dst)
+ } else {
+ gmove(&tdst, &dst)
+ }
+
+ c = int32(w % 4) // bytes
+ q = int32(w / 4) // doublewords
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ // reverse direction
+ gins(i386.ASTD, nil, nil) // set direction flag
+ if c > 0 {
+ gconreg(i386.AADDL, w-1, i386.REG_SI)
+ gconreg(i386.AADDL, w-1, i386.REG_DI)
+
+ gconreg(i386.AMOVL, int64(c), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+ }
+
+ if q > 0 {
+ if c > 0 {
+ gconreg(i386.AADDL, -3, i386.REG_SI)
+ gconreg(i386.AADDL, -3, i386.REG_DI)
+ } else {
+ gconreg(i386.AADDL, w-4, i386.REG_SI)
+ gconreg(i386.AADDL, w-4, i386.REG_DI)
+ }
+
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
+ }
+
+ // we leave with the flag clear
+ gins(i386.ACLD, nil, nil)
+ } else {
+ gins(i386.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
+
+ // normal direction
+ if q > 128 || (q >= 4 && gc.Nacl) {
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFCOPY, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+ // 10 and 128 = magic constants: see ../../runtime/asm_386.s
+ p.To.Offset = 10 * (128 - int64(q))
+ } else if !gc.Nacl && c == 0 {
+ gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+
+ // We don't need the MOVSL side-effect of updating SI and DI,
+ // and issuing a sequence of MOVLs directly is faster.
+ src.Op = gc.OINDREG
+
+ dst.Op = gc.OINDREG
+ for q > 0 {
+ gmove(&src, &cx) // MOVL x+(SI),CX
+ gmove(&cx, &dst) // MOVL CX,x+(DI)
+ src.Xoffset += 4
+ dst.Xoffset += 4
+ q--
+ }
+ } else {
+ for q > 0 {
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+ q--
+ }
+ }
+
+ for c > 0 {
+ gins(i386.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+ c--
+ }
+ }
+}
+
+func cadable(n *gc.Node) int {
+ if !(n.Addable != 0) {
+ // dont know how it happens,
+ // but it does
+ return 0
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) int {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) != 0 {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) != 0 {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !(cadable(nl) != 0) {
+ if nr != nil && !(cadable(nr) != 0) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !(cadable(nr) != 0) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !(gc.Isslice(t) != 0) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 0
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 1
+}
diff --git a/src/cmd/new8g/cgen64.go b/src/cmd/new8g/cgen64.go
new file mode 100644
index 0000000000..108fc6aba5
--- /dev/null
+++ b/src/cmd/new8g/cgen64.go
@@ -0,0 +1,609 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+/*
+ * attempt to generate 64-bit
+ * res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+ var t1 gc.Node
+ var t2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var cx gc.Node
+ var ex gc.Node
+ var fx gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var lo1 gc.Node
+ var lo2 gc.Node
+ var hi1 gc.Node
+ var hi2 gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var v uint64
+ var lv uint32
+ var hv uint32
+
+ if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+ gc.Dump("n", n)
+ gc.Dump("res", res)
+ gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ }
+
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ fallthrough
+
+ case gc.OMINUS:
+ cgen(n.Left, res)
+ split64(res, &lo1, &hi1)
+ gins(i386.ANEGL, nil, &lo1)
+ gins(i386.AADCL, ncon(0), &hi1)
+ gins(i386.ANEGL, nil, &hi1)
+ splitclean()
+ return
+
+ case gc.OCOM:
+ cgen(n.Left, res)
+ split64(res, &lo1, &hi1)
+ gins(i386.ANOTL, nil, &lo1)
+ gins(i386.ANOTL, nil, &hi1)
+ splitclean()
+ return
+
+ // binary operators.
+ // common setup below.
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ break
+ }
+
+ l = n.Left
+ r = n.Right
+ if !(l.Addable != 0) {
+ gc.Tempname(&t1, l.Type)
+ cgen(l, &t1)
+ l = &t1
+ }
+
+ if r != nil && !(r.Addable != 0) {
+ gc.Tempname(&t2, r.Type)
+ cgen(r, &t2)
+ r = &t2
+ }
+
+ gc.Nodreg(&ax, gc.Types[gc.TINT32], i386.REG_AX)
+ gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+ gc.Nodreg(&dx, gc.Types[gc.TINT32], i386.REG_DX)
+
+ // Setup for binary operation.
+ split64(l, &lo1, &hi1)
+
+ if gc.Is64(r.Type) != 0 {
+ split64(r, &lo2, &hi2)
+ }
+
+ // Do op. Leave result in DX:AX.
+ switch n.Op {
+ // TODO: Constants
+ case gc.OADD:
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.AADDL, &lo2, &ax)
+ gins(i386.AADCL, &hi2, &dx)
+
+ // TODO: Constants.
+ case gc.OSUB:
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.ASUBL, &lo2, &ax)
+ gins(i386.ASBBL, &hi2, &dx)
+
+ // let's call the next two EX and FX.
+ case gc.OMUL:
+ regalloc(&ex, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&fx, gc.Types[gc.TPTR32], nil)
+
+ // load args into DX:AX and EX:CX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.AMOVL, &lo2, &cx)
+ gins(i386.AMOVL, &hi2, &ex)
+
+ // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
+ gins(i386.AMOVL, &dx, &fx)
+
+ gins(i386.AORL, &ex, &fx)
+ p1 = gc.Gbranch(i386.AJNE, nil, 0)
+ gins(i386.AMULL, &cx, nil) // implicit &ax
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // full 64x64 -> 64, from 32x32 -> 64.
+ gins(i386.AIMULL, &cx, &dx)
+
+ gins(i386.AMOVL, &ax, &fx)
+ gins(i386.AIMULL, &ex, &fx)
+ gins(i386.AADDL, &dx, &fx)
+ gins(i386.AMOVL, &cx, &dx)
+ gins(i386.AMULL, &dx, nil) // implicit &ax
+ gins(i386.AADDL, &fx, &dx)
+ gc.Patch(p2, gc.Pc)
+
+ regfree(&ex)
+ regfree(&fx)
+
+ // We only rotate by a constant c in [0,64).
+ // if c >= 32:
+ // lo, hi = hi, lo
+ // c -= 32
+ // if c == 0:
+ // no-op
+ // else:
+ // t = hi
+ // shld hi:lo, c
+ // shld lo:t, c
+ case gc.OLROT:
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+
+ if v >= 32 {
+ // reverse during load to do the first 32 bits of rotate
+ v -= 32
+
+ gins(i386.AMOVL, &lo1, &dx)
+ gins(i386.AMOVL, &hi1, &ax)
+ } else {
+ gins(i386.AMOVL, &lo1, &ax)
+ gins(i386.AMOVL, &hi1, &dx)
+ }
+
+ if v == 0 {
+ } else // done
+ {
+ gins(i386.AMOVL, &dx, &cx)
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &ax)
+ p1.From.Index = i386.REG_CX // double-width shift
+ p1.From.Scale = 0
+ }
+
+ case gc.OLSH:
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ splitclean()
+ split64(res, &lo2, &hi2)
+ gins(i386.AMOVL, ncon(0), &lo2)
+ gins(i386.AMOVL, ncon(0), &hi2)
+ splitclean()
+ goto out
+ }
+
+ if v >= 32 {
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ split64(res, &lo2, &hi2)
+ gmove(&lo1, &hi2)
+ if v > 32 {
+ gins(i386.ASHLL, ncon(uint32(v-32)), &hi2)
+ }
+
+ gins(i386.AMOVL, ncon(0), &lo2)
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ // general shift
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ gins(i386.ASHLL, ncon(uint32(v)), &ax)
+ break
+ }
+
+ // load value into DX:AX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+
+ // load shift value into register.
+ // if high bits are set, zero value.
+ p1 = nil
+
+ if gc.Is64(r.Type) != 0 {
+ gins(i386.ACMPL, &hi2, ncon(0))
+ p1 = gc.Gbranch(i386.AJNE, nil, +1)
+ gins(i386.AMOVL, &lo2, &cx)
+ } else {
+ cx.Type = gc.Types[gc.TUINT32]
+ gmove(r, &cx)
+ }
+
+ // if shift count is >=64, zero value
+ gins(i386.ACMPL, &cx, ncon(64))
+
+ p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p1 != nil {
+ gc.Patch(p1, gc.Pc)
+ }
+ gins(i386.AXORL, &dx, &dx)
+ gins(i386.AXORL, &ax, &ax)
+ gc.Patch(p2, gc.Pc)
+
+ // if shift count is >= 32, zero low.
+ gins(i386.ACMPL, &cx, ncon(32))
+
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ gins(i386.AMOVL, &ax, &dx)
+ gins(i386.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
+ gins(i386.AXORL, &ax, &ax)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // general shift
+ p1 = gins(i386.ASHLL, &cx, &dx)
+
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ gins(i386.ASHLL, &cx, &ax)
+ gc.Patch(p2, gc.Pc)
+
+ case gc.ORSH:
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ splitclean()
+ split64(res, &lo2, &hi2)
+ if hi1.Type.Etype == gc.TINT32 {
+ gmove(&hi1, &lo2)
+ gins(i386.ASARL, ncon(31), &lo2)
+ gmove(&hi1, &hi2)
+ gins(i386.ASARL, ncon(31), &hi2)
+ } else {
+ gins(i386.AMOVL, ncon(0), &lo2)
+ gins(i386.AMOVL, ncon(0), &hi2)
+ }
+
+ splitclean()
+ goto out
+ }
+
+ if v >= 32 {
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ split64(res, &lo2, &hi2)
+ gmove(&hi1, &lo2)
+ if v > 32 {
+ gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
+ }
+ if hi1.Type.Etype == gc.TINT32 {
+ gmove(&hi1, &hi2)
+ gins(i386.ASARL, ncon(31), &hi2)
+ } else {
+ gins(i386.AMOVL, ncon(0), &hi2)
+ }
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ // general shift
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ p1 = gins(i386.ASHRL, ncon(uint32(v)), &ax)
+ p1.From.Index = i386.REG_DX // double-width shift
+ p1.From.Scale = 0
+ gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
+ break
+ }
+
+ // load value into DX:AX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+
+ // load shift value into register.
+ // if high bits are set, zero value.
+ p1 = nil
+
+ if gc.Is64(r.Type) != 0 {
+ gins(i386.ACMPL, &hi2, ncon(0))
+ p1 = gc.Gbranch(i386.AJNE, nil, +1)
+ gins(i386.AMOVL, &lo2, &cx)
+ } else {
+ cx.Type = gc.Types[gc.TUINT32]
+ gmove(r, &cx)
+ }
+
+ // if shift count is >=64, zero or sign-extend value
+ gins(i386.ACMPL, &cx, ncon(64))
+
+ p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p1 != nil {
+ gc.Patch(p1, gc.Pc)
+ }
+ if hi1.Type.Etype == gc.TINT32 {
+ gins(i386.ASARL, ncon(31), &dx)
+ gins(i386.AMOVL, &dx, &ax)
+ } else {
+ gins(i386.AXORL, &dx, &dx)
+ gins(i386.AXORL, &ax, &ax)
+ }
+
+ gc.Patch(p2, gc.Pc)
+
+ // if shift count is >= 32, sign-extend hi.
+ gins(i386.ACMPL, &cx, ncon(32))
+
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ gins(i386.AMOVL, &dx, &ax)
+ if hi1.Type.Etype == gc.TINT32 {
+ gins(i386.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
+ gins(i386.ASARL, ncon(31), &dx)
+ } else {
+ gins(i386.ASHRL, &cx, &ax)
+ gins(i386.AXORL, &dx, &dx)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // general shift
+ p1 = gins(i386.ASHRL, &cx, &ax)
+
+ p1.From.Index = i386.REG_DX // double-width shift
+ p1.From.Scale = 0
+ gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
+ gc.Patch(p2, gc.Pc)
+
+ // make constant the right side (it usually is anyway).
+ case gc.OXOR,
+ gc.OAND,
+ gc.OOR:
+ if lo1.Op == gc.OLITERAL {
+ nswap(&lo1, &lo2)
+ nswap(&hi1, &hi2)
+ }
+
+ if lo2.Op == gc.OLITERAL {
+ // special cases for constants.
+ lv = uint32(gc.Mpgetfix(lo2.Val.U.Xval))
+
+ hv = uint32(gc.Mpgetfix(hi2.Val.U.Xval))
+ splitclean() // right side
+ split64(res, &lo2, &hi2)
+ switch n.Op {
+ case gc.OXOR:
+ gmove(&lo1, &lo2)
+ gmove(&hi1, &hi2)
+ switch lv {
+ case 0:
+ break
+
+ case 0xffffffff:
+ gins(i386.ANOTL, nil, &lo2)
+
+ default:
+ gins(i386.AXORL, ncon(lv), &lo2)
+ }
+
+ switch hv {
+ case 0:
+ break
+
+ case 0xffffffff:
+ gins(i386.ANOTL, nil, &hi2)
+
+ default:
+ gins(i386.AXORL, ncon(hv), &hi2)
+ }
+
+ case gc.OAND:
+ switch lv {
+ case 0:
+ gins(i386.AMOVL, ncon(0), &lo2)
+
+ default:
+ gmove(&lo1, &lo2)
+ if lv != 0xffffffff {
+ gins(i386.AANDL, ncon(lv), &lo2)
+ }
+ }
+
+ switch hv {
+ case 0:
+ gins(i386.AMOVL, ncon(0), &hi2)
+
+ default:
+ gmove(&hi1, &hi2)
+ if hv != 0xffffffff {
+ gins(i386.AANDL, ncon(hv), &hi2)
+ }
+ }
+
+ case gc.OOR:
+ switch lv {
+ case 0:
+ gmove(&lo1, &lo2)
+
+ case 0xffffffff:
+ gins(i386.AMOVL, ncon(0xffffffff), &lo2)
+
+ default:
+ gmove(&lo1, &lo2)
+ gins(i386.AORL, ncon(lv), &lo2)
+ }
+
+ switch hv {
+ case 0:
+ gmove(&hi1, &hi2)
+
+ case 0xffffffff:
+ gins(i386.AMOVL, ncon(0xffffffff), &hi2)
+
+ default:
+ gmove(&hi1, &hi2)
+ gins(i386.AORL, ncon(hv), &hi2)
+ }
+ }
+
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ gins(i386.AMOVL, &lo1, &ax)
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
+ gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
+ }
+
+ if gc.Is64(r.Type) != 0 {
+ splitclean()
+ }
+ splitclean()
+
+ split64(res, &lo1, &hi1)
+ gins(i386.AMOVL, &ax, &lo1)
+ gins(i386.AMOVL, &dx, &hi1)
+ splitclean()
+
+out:
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+ var lo1 gc.Node
+ var hi1 gc.Node
+ var lo2 gc.Node
+ var hi2 gc.Node
+ var rr gc.Node
+ var br *obj.Prog
+ var t *gc.Type
+
+ split64(nl, &lo1, &hi1)
+ split64(nr, &lo2, &hi2)
+
+ // compare most significant word;
+ // if they differ, we're done.
+ t = hi1.Type
+
+ if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+ gins(i386.ACMPL, &hi1, &hi2)
+ } else {
+ regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gins(i386.AMOVL, &hi1, &rr)
+ gins(i386.ACMPL, &rr, &hi2)
+ regfree(&rr)
+ }
+
+ br = nil
+ switch op {
+ default:
+ gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+ fallthrough
+
+ // cmp hi
+ // jne L
+ // cmp lo
+ // jeq to
+ // L:
+ case gc.OEQ:
+ br = gc.Gbranch(i386.AJNE, nil, -likely)
+
+ // cmp hi
+ // jne to
+ // cmp lo
+ // jne to
+ case gc.ONE:
+ gc.Patch(gc.Gbranch(i386.AJNE, nil, likely), to)
+
+ // cmp hi
+ // jgt to
+ // jlt L
+ // cmp lo
+ // jge to (or jgt to)
+ // L:
+ case gc.OGE,
+ gc.OGT:
+ gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+ // cmp hi
+ // jlt to
+ // jgt L
+ // cmp lo
+ // jle to (or jlt to)
+ // L:
+ case gc.OLE,
+ gc.OLT:
+ gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+ }
+
+ // compare least significant word
+ t = lo1.Type
+
+ if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+ gins(i386.ACMPL, &lo1, &lo2)
+ } else {
+ regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gins(i386.AMOVL, &lo1, &rr)
+ gins(i386.ACMPL, &rr, &lo2)
+ regfree(&rr)
+ }
+
+ // jump again
+ gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+ // point first branch down here if appropriate
+ if br != nil {
+ gc.Patch(br, gc.Pc)
+ }
+
+ splitclean()
+ splitclean()
+}
diff --git a/src/cmd/new8g/galign.go b/src/cmd/new8g/galign.go
new file mode 100644
index 0000000000..a4f844dfff
--- /dev/null
+++ b/src/cmd/new8g/galign.go
@@ -0,0 +1,84 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+var thechar int = '8'
+
+var thestring string = "386"
+
+var thelinkarch *obj.LinkArch = &i386.Link386
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT32},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ gc.Widthreg = 4
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = i386.REGSP
+ gc.Thearch.REGCTXT = i386.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = FtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
diff --git a/src/cmd/new8g/gg.go b/src/cmd/new8g/gg.go
new file mode 100644
index 0000000000..4aeff92952
--- /dev/null
+++ b/src/cmd/new8g/gg.go
@@ -0,0 +1,34 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/i386"
+import "cmd/internal/gc"
+
+// TODO(rsc):
+// assume CLD?
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// foptoas flags
+const (
+ Frev = 1 << 0
+ Fpop = 1 << 1
+ Fpop2 = 1 << 2
+)
+
+var reg [i386.MAXREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
diff --git a/src/cmd/new8g/ggen.go b/src/cmd/new8g/ggen.go
new file mode 100644
index 0000000000..8dd469c009
--- /dev/null
+++ b/src/cmd/new8g/ggen.go
@@ -0,0 +1,1297 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var ax uint32
+ var p *obj.Prog
+ var lo int64
+ var hi int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ ax = 0
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !(n.Needzero != 0) {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+ if lo != hi && n.Xoffset+n.Type.Width == lo-int64(2*gc.Widthptr) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &ax)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, i386.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt <= int64(4*gc.Widthreg) {
+ for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_REG, i386.REG_AX, 0, obj.TYPE_MEM, i386.REG_SP, frame+lo+i)
+ }
+ } else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
+ p = appendpp(p, i386.ALEAL, obj.TYPE_MEM, i386.REG_SP, frame+lo, obj.TYPE_REG, i386.REG_DI, 0)
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ } else {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, i386.REG_CX, 0)
+ p = appendpp(p, i386.ALEAL, obj.TYPE_MEM, i386.REG_SP, frame+lo, obj.TYPE_REG, i386.REG_DI, 0)
+ p = appendpp(p, i386.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = appendpp(p, i386.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint32
+ var c uint32
+ var q uint32
+ var n1 gc.Node
+ var z gc.Node
+ var p *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = uint32(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) != 0 {
+ return
+ }
+
+ c = w % 4 // bytes
+ q = w / 4 // quads
+
+ if q < 4 {
+ // Write sequence of MOV 0, off(base) instead of using STOSL.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSL loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ regalloc(&n1, gc.Types[gc.Tptr], nil)
+
+ agen(nl, &n1)
+ n1.Op = gc.OINDREG
+ gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+ for {
+ tmp14 := q
+ q--
+ if !(tmp14 > 0) {
+ break
+ }
+ n1.Type = z.Type
+ gins(i386.AMOVL, &z, &n1)
+ n1.Xoffset += 4
+ }
+
+ gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+ for {
+ tmp15 := c
+ c--
+ if !(tmp15 > 0) {
+ break
+ }
+ n1.Type = z.Type
+ gins(i386.AMOVB, &z, &n1)
+ n1.Xoffset++
+ }
+
+ regfree(&n1)
+ return
+ }
+
+ gc.Nodreg(&n1, gc.Types[gc.Tptr], i386.REG_DI)
+ agen(nl, &n1)
+ gconreg(i386.AMOVL, 0, i386.REG_AX)
+
+ if q > 128 || (q >= 4 && gc.Nacl) {
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFZERO, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+
+ // 1 and 128 = magic constants: see ../../runtime/asm_386.s
+ p.To.Offset = 1 * (128 - int64(q))
+ } else {
+ for q > 0 {
+ gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
+ q--
+ }
+ }
+
+ for c > 0 {
+ gins(i386.ASTOSB, nil, nil) // STOB AL,*(DI)+
+ c--
+ }
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var r1 gc.Node
+ var con gc.Node
+ var stk gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an x86 NOP that we will have the right line number.
+ // x86 NOP 0x90 is really XCHG AX, AX; use that description
+ // because the NOP pseudo-instruction will be removed by
+ // the linker.
+ gc.Nodreg(&reg, gc.Types[gc.TINT], i386.REG_AX)
+
+ gins(i386.AXCHGL, &reg, &reg)
+ }
+
+ p = gins(obj.ACALL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) != 0 {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(&reg, gc.Types[gc.Tptr], i386.REG_DX)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], i386.REG_BX)
+ gmove(f, &reg)
+ reg.Op = gc.OINDREG
+ gmove(&reg, &r1)
+ reg.Op = gc.OREGISTER
+ gins(obj.ACALL, &reg, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ stk = gc.Node{}
+
+ stk.Op = gc.OINDREG
+ stk.Val.U.Reg = i386.REG_SP
+ stk.Xoffset = 0
+
+ // size of arguments at 0(SP)
+ gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
+
+ gins(i386.AMOVL, &con, &stk)
+
+ // FuncVal* at 4(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gins(i386.AMOVL, f, &stk)
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ ginscall(gc.Deferproc, 0)
+ }
+ if proc == 2 {
+ gc.Nodreg(&reg, gc.Types[gc.TINT32], i386.REG_AX)
+ gins(i386.ATESTL, &reg, &reg)
+ p = gc.Gbranch(i386.AJEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if !(i.Addable != 0) {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], i386.REG_SP)
+
+ nodsp.Xoffset = 0
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo)
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ gins(i386.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = i386.REG_SP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = i386.REG_SP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, &nod1, &nod2)
+ gins(i386.AMOVL, &nod2, res)
+ regfree(&nod2)
+ } else {
+ gins(i386.ALEAL, &nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * caller must set:
+ * ax = allocated AX register
+ * dx = allocated DX register
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
+ var check int
+ var n1 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var t3 gc.Node
+ var t4 gc.Node
+ var n4 gc.Node
+ var nz gc.Node
+ var t *gc.Type
+ var t0 *gc.Type
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will trap.
+ // Also the byte divide instruction needs AH,
+ // which we otherwise don't have to deal with.
+ // Easiest way to avoid for int8, int16: use int32.
+ // For int32 and int64, use explicit test.
+ // Could use int64 hw for int32.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 4 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ check = 0
+ }
+
+ gc.Tempname(&t1, t)
+ gc.Tempname(&t2, t)
+ if t0 != t {
+ gc.Tempname(&t3, t0)
+ gc.Tempname(&t4, t0)
+ cgen(nl, &t3)
+ cgen(nr, &t4)
+
+ // Convert.
+ gmove(&t3, &t1)
+
+ gmove(&t4, &t2)
+ } else {
+ cgen(nl, &t1)
+ cgen(nr, &t2)
+ }
+
+ if !(gc.Samereg(ax, res) != 0) && !(gc.Samereg(dx, res) != 0) {
+ regalloc(&n1, t, res)
+ } else {
+ regalloc(&n1, t, nil)
+ }
+ gmove(&t2, &n1)
+ gmove(&t1, ax)
+ p2 = nil
+ if gc.Nacl {
+ // Native Client does not relay the divide-by-zero trap
+ // to the executing program, so we must insert a check
+ // for ourselves.
+ gc.Nodconst(&n4, t, 0)
+
+ gins(optoas(gc.OCMP, t), &n1, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if check != 0 {
+ gc.Nodconst(&n4, t, -1)
+ gins(optoas(gc.OCMP, t), &n1, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, ax)
+
+ gmove(ax, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&n4, t, 0)
+
+ gmove(&n4, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if !(gc.Issigned[t.Etype] != 0) {
+ gc.Nodconst(&nz, t, 0)
+ gmove(&nz, dx)
+ } else {
+ gins(optoas(gc.OEXTEND, t), nil, nil)
+ }
+ gins(optoas(op, t), &n1, nil)
+ regfree(&n1)
+
+ if op == gc.ODIV {
+ gmove(ax, res)
+ } else {
+ gmove(dx, res)
+ }
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+ var r int
+
+ r = int(reg[dr])
+ gc.Nodreg(x, gc.Types[gc.TINT32], dr)
+
+ // save current ax and dx if they are live
+ // and not the destination
+ *oldx = gc.Node{}
+
+ if r > 0 && !(gc.Samereg(x, res) != 0) {
+ gc.Tempname(oldx, gc.Types[gc.TINT32])
+ gmove(x, oldx)
+ }
+
+ regalloc(x, t, x)
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+ regfree(x)
+
+ if oldx.Op != 0 {
+ x.Type = gc.Types[gc.TINT32]
+ gmove(oldx, x)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var ax gc.Node
+ var dx gc.Node
+ var oldax gc.Node
+ var olddx gc.Node
+ var t *gc.Type
+
+ if gc.Is64(nl.Type) != 0 {
+ gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
+ }
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ savex(i386.REG_AX, &ax, &oldax, res, t)
+ savex(i386.REG_DX, &dx, &olddx, res, t)
+ dodiv(op, nl, nr, res, &ax, &dx)
+ restx(&dx, &olddx)
+ restx(&ax, &oldax)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var hi gc.Node
+ var lo gc.Node
+ var a int
+ var w int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var sc uint64
+
+ if nl.Type.Width > 4 {
+ gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
+ }
+
+ w = int(nl.Type.Width * 8)
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ gc.Tempname(&n2, nl.Type)
+ cgen(nl, &n2)
+ regalloc(&n1, nl.Type, res)
+ gmove(&n2, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gins(a, ncon(uint32(w)-1), &n1)
+
+ gins(a, ncon(uint32(w)-1), &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ oldcx = gc.Node{}
+ gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+ if reg[i386.REG_CX] > 1 && !(gc.Samereg(&cx, res) != 0) {
+ gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
+ gmove(&cx, &oldcx)
+ }
+
+ if nr.Type.Width > 4 {
+ gc.Tempname(&nt, nr.Type)
+ n1 = nt
+ } else {
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+ regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ }
+
+ if gc.Samereg(&cx, res) != 0 {
+ regalloc(&n2, nl.Type, nil)
+ } else {
+ regalloc(&n2, nl.Type, res)
+ }
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ } else {
+ cgen(nr, &n1)
+ cgen(nl, &n2)
+ }
+
+ // test and fix up large shifts
+ if bounded != 0 {
+ if nr.Type.Width > 4 {
+ // delayed reg alloc
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+
+ regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ split64(&nt, &lo, &hi)
+ gmove(&lo, &n1)
+ splitclean()
+ }
+ } else {
+ if nr.Type.Width > 4 {
+ // delayed reg alloc
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+
+ regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ split64(&nt, &lo, &hi)
+ gmove(&lo, &n1)
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
+ p2 = gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ splitclean()
+ gc.Patch(p2, gc.Pc)
+ } else {
+ gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ }
+
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gins(a, ncon(uint32(w)-1), &n2)
+ } else {
+ gmove(ncon(0), &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ if oldcx.Op != 0 {
+ gmove(&oldcx, &cx)
+ }
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate byte multiply:
+ * res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var a int
+
+ // copy from byte to full registers
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ regalloc(&n1, t, res)
+ cgen(nr, &n1)
+ regalloc(&n2, t, nil)
+ gmove(&nt, &n2)
+ a = optoas(op, t)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var t *gc.Type
+ var a int
+ var n1 gc.Node
+ var n2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+
+ t = nl.Type
+ a = optoas(gc.OHMUL, t)
+
+ // gen nl in n1.
+ gc.Tempname(&n1, t)
+
+ cgen(nl, &n1)
+
+ // gen nr in n2.
+ regalloc(&n2, t, res)
+
+ cgen(nr, &n2)
+
+ // multiply.
+ gc.Nodreg(&ax, t, i386.REG_AX)
+
+ gmove(&n2, &ax)
+ gins(a, &n1, nil)
+ regfree(&n2)
+
+ if t.Width == 1 {
+ // byte multiply behaves differently.
+ gc.Nodreg(&ax, t, i386.REG_AH)
+
+ gc.Nodreg(&dx, t, i386.REG_DX)
+ gmove(&ax, &dx)
+ }
+
+ gc.Nodreg(&dx, t, i386.REG_DX)
+ gmove(&dx, res)
+}
+
+/*
+ * generate floating-point operation.
+ */
+func cgen_float(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+
+ nl = n.Left
+ switch n.Op {
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Pc
+ gmove(gc.Nodbool(1), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(0), res)
+ gc.Patch(p3, gc.Pc)
+ return
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ return
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ cgen(nl, res)
+ return
+ }
+
+ gc.Tempname(&n2, n.Type)
+ mgen(nl, &n1, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ mfree(&n1)
+ return
+ }
+
+ if gc.Use_sse != 0 {
+ cgen_floatsse(n, res)
+ } else {
+ cgen_float387(n, res)
+ }
+}
+
+// floating-point. 387 (not SSE2)
+func cgen_float387(n *gc.Node, res *gc.Node) {
+ var f0 gc.Node
+ var f1 gc.Node
+ var nl *gc.Node
+ var nr *gc.Node
+
+ nl = n.Left
+ nr = n.Right
+ gc.Nodreg(&f0, nl.Type, i386.REG_F0)
+ gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
+ if nr != nil {
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
+ }
+ gmove(&f0, res)
+ return
+
+flt2: // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ if nr.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+ } else {
+ cgen(nr, &f0)
+ gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+ }
+ } else {
+ cgen(nr, &f0)
+ if nl.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+ } else {
+ cgen(nl, &f0)
+ gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+ }
+ }
+
+ gmove(&f0, res)
+ return
+}
+
+func cgen_floatsse(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var a int
+
+ nl = n.Left
+ nr = n.Right
+ switch n.Op {
+ default:
+ gc.Dump("cgen_floatsse", n)
+ gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
+ return
+
+ case gc.OMINUS,
+ gc.OCOM:
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = foptoas(gc.OMUL, nl.Type, 0)
+ goto sbop
+
+ // symmetric binary
+ case gc.OADD,
+ gc.OMUL:
+ a = foptoas(int(n.Op), nl.Type, 0)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB,
+ gc.OMOD,
+ gc.ODIV:
+ a = foptoas(int(n.Op), nl.Type, 0)
+
+ goto abop
+ }
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ mgen(nr, &n2, nil)
+ regalloc(&n1, nl.Type, res)
+ gmove(&nt, &n1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ mfree(&n2)
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+ }
+
+ return
+}
+
+func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var ax gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ nl = n.Left
+ nr = n.Right
+ a = int(n.Op)
+ if !(true_ != 0) {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // No need to avoid re-genning ninit.
+ bgen_float(n, 1, -likely, p2)
+
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ return
+ }
+
+ if gc.Use_sse != 0 {
+ goto sse
+ } else {
+ goto x87
+ }
+
+x87:
+ a = gc.Brrev(a) // because the args are stacked
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r = nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gc.Nodreg(&tmp, nr.Type, i386.REG_F0)
+ gc.Nodreg(&n2, nr.Type, i386.REG_F0+1)
+ gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
+ et = gc.Simsimtype(nr.Type)
+ if et == gc.TFLOAT64 {
+ if nl.Ullman > nr.Ullman {
+ cgen(nl, &tmp)
+ cgen(nr, &tmp)
+ gins(i386.AFXCHD, &tmp, &n2)
+ } else {
+ cgen(nr, &tmp)
+ cgen(nl, &tmp)
+ }
+
+ gins(i386.AFUCOMIP, &tmp, &n2)
+ gins(i386.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF
+ } else {
+ // TODO(rsc): The moves back and forth to memory
+ // here are for truncating the value to 32 bits.
+ // This handles 32-bit comparison but presumably
+ // all the other ops have the same problem.
+ // We need to figure out what the right general
+ // solution is, besides telling people to use float64.
+ gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
+
+ gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
+ cgen(nr, &t1)
+ cgen(nl, &t2)
+ gmove(&t2, &tmp)
+ gins(i386.AFCOMFP, &t1, &tmp)
+ gins(i386.AFSTSW, nil, &ax)
+ gins(i386.ASAHF, nil, nil)
+ }
+
+ goto ret
+
+sse:
+ if !(nl.Addable != 0) {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if !(nr.Addable != 0) {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ if nl.Op != gc.OREGISTER {
+ regalloc(&n3, nl.Type, nil)
+ gmove(nl, &n3)
+ nl = &n3
+ }
+
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r = nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+
+ret:
+ if a == gc.OEQ {
+ // neither NE nor P
+ p1 = gc.Gbranch(i386.AJNE, nil, -likely)
+
+ p2 = gc.Gbranch(i386.AJPS, nil, -likely)
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ } else if a == gc.ONE {
+ // either NE or P
+ gc.Patch(gc.Gbranch(i386.AJNE, nil, likely), to)
+
+ gc.Patch(gc.Gbranch(i386.AJPS, nil, likely), to)
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to)
+ }
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+
+ // check is
+ // CMP arg, $0
+ // JNE 2(PC) (likely)
+ // MOV AX, 0
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = i386.ACMPL
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
+ p1.As = i386.AJNE
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = 1 // likely
+ p1.To.Type = obj.TYPE_BRANCH
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ // if possible, since we know arg is 0, use 0(arg),
+ // which will be shorter to encode than plain 0.
+ p2.As = i386.AMOVL
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = i386.REG_AX
+ if regtyp(&p.From) != 0 {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = p.From.Reg
+ } else {
+ p2.To.Type = obj.TYPE_MEM
+ }
+ p2.To.Offset = 0
+ }
+}
diff --git a/src/cmd/new8g/gsubr.go b/src/cmd/new8g/gsubr.go
new file mode 100644
index 0000000000..eaf0896903
--- /dev/null
+++ b/src/cmd/new8g/gsubr.go
@@ -0,0 +1,1933 @@
+// Derived from Inferno utils/8c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 8l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero uint32 = 4096
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OADDR<<16 | gc.TPTR32:
+ a = i386.ALEAL
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = i386.AJEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = i386.AJNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64:
+ a = i386.AJLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = i386.AJCS
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64:
+ a = i386.AJLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = i386.AJLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64:
+ a = i386.AJGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = i386.AJHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64:
+ a = i386.AJGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = i386.AJCC
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8:
+ a = i386.ACMPB
+
+ case gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16:
+ a = i386.ACMPW
+
+ case gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = i386.ACMPL
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8,
+ gc.OAS<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ case gc.OAS<<16 | gc.TINT16,
+ gc.OAS<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = i386.AMOVL
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8:
+ a = i386.AADDB
+
+ case gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16:
+ a = i386.AADDW
+
+ case gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = i386.AADDL
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8:
+ a = i386.ASUBB
+
+ case gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16:
+ a = i386.ASUBW
+
+ case gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = i386.ASUBL
+
+ case gc.OINC<<16 | gc.TINT8,
+ gc.OINC<<16 | gc.TUINT8:
+ a = i386.AINCB
+
+ case gc.OINC<<16 | gc.TINT16,
+ gc.OINC<<16 | gc.TUINT16:
+ a = i386.AINCW
+
+ case gc.OINC<<16 | gc.TINT32,
+ gc.OINC<<16 | gc.TUINT32,
+ gc.OINC<<16 | gc.TPTR32:
+ a = i386.AINCL
+
+ case gc.ODEC<<16 | gc.TINT8,
+ gc.ODEC<<16 | gc.TUINT8:
+ a = i386.ADECB
+
+ case gc.ODEC<<16 | gc.TINT16,
+ gc.ODEC<<16 | gc.TUINT16:
+ a = i386.ADECW
+
+ case gc.ODEC<<16 | gc.TINT32,
+ gc.ODEC<<16 | gc.TUINT32,
+ gc.ODEC<<16 | gc.TPTR32:
+ a = i386.ADECL
+
+ case gc.OCOM<<16 | gc.TINT8,
+ gc.OCOM<<16 | gc.TUINT8:
+ a = i386.ANOTB
+
+ case gc.OCOM<<16 | gc.TINT16,
+ gc.OCOM<<16 | gc.TUINT16:
+ a = i386.ANOTW
+
+ case gc.OCOM<<16 | gc.TINT32,
+ gc.OCOM<<16 | gc.TUINT32,
+ gc.OCOM<<16 | gc.TPTR32:
+ a = i386.ANOTL
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8:
+ a = i386.ANEGB
+
+ case gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16:
+ a = i386.ANEGW
+
+ case gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = i386.ANEGL
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8:
+ a = i386.AANDB
+
+ case gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16:
+ a = i386.AANDW
+
+ case gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = i386.AANDL
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8:
+ a = i386.AORB
+
+ case gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16:
+ a = i386.AORW
+
+ case gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = i386.AORL
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8:
+ a = i386.AXORB
+
+ case gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16:
+ a = i386.AXORW
+
+ case gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = i386.AXORL
+
+ case gc.OLROT<<16 | gc.TINT8,
+ gc.OLROT<<16 | gc.TUINT8:
+ a = i386.AROLB
+
+ case gc.OLROT<<16 | gc.TINT16,
+ gc.OLROT<<16 | gc.TUINT16:
+ a = i386.AROLW
+
+ case gc.OLROT<<16 | gc.TINT32,
+ gc.OLROT<<16 | gc.TUINT32,
+ gc.OLROT<<16 | gc.TPTR32:
+ a = i386.AROLL
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8:
+ a = i386.ASHLB
+
+ case gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16:
+ a = i386.ASHLW
+
+ case gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = i386.ASHLL
+
+ case gc.ORSH<<16 | gc.TUINT8:
+ a = i386.ASHRB
+
+ case gc.ORSH<<16 | gc.TUINT16:
+ a = i386.ASHRW
+
+ case gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = i386.ASHRL
+
+ case gc.ORSH<<16 | gc.TINT8:
+ a = i386.ASARB
+
+ case gc.ORSH<<16 | gc.TINT16:
+ a = i386.ASARW
+
+ case gc.ORSH<<16 | gc.TINT32:
+ a = i386.ASARL
+
+ case gc.OHMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TUINT8:
+ a = i386.AIMULB
+
+ case gc.OHMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TUINT16:
+ a = i386.AIMULW
+
+ case gc.OHMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = i386.AIMULL
+
+ case gc.OHMUL<<16 | gc.TUINT8:
+ a = i386.AMULB
+
+ case gc.OHMUL<<16 | gc.TUINT16:
+ a = i386.AMULW
+
+ case gc.OHMUL<<16 | gc.TUINT32,
+ gc.OHMUL<<16 | gc.TPTR32:
+ a = i386.AMULL
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT8:
+ a = i386.AIDIVB
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT8:
+ a = i386.ADIVB
+
+ case gc.ODIV<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT16:
+ a = i386.AIDIVW
+
+ case gc.ODIV<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT16:
+ a = i386.ADIVW
+
+ case gc.ODIV<<16 | gc.TINT32,
+ gc.OMOD<<16 | gc.TINT32:
+ a = i386.AIDIVL
+
+ case gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = i386.ADIVL
+
+ case gc.OEXTEND<<16 | gc.TINT16:
+ a = i386.ACWD
+
+ case gc.OEXTEND<<16 | gc.TINT32:
+ a = i386.ACDQ
+ }
+
+ return a
+}
+
+func foptoas(op int, t *gc.Type, flg int) int {
+ var et int
+ var a int
+
+ a = obj.AXXX
+ et = int(gc.Simtype[t.Etype])
+
+ if gc.Use_sse != 0 {
+ goto sse
+ }
+
+ // If we need Fpop, it means we're working on
+ // two different floating-point registers, not memory.
+ // There the instruction only has a float64 form.
+ if flg&Fpop != 0 {
+ et = gc.TFLOAT64
+ }
+
+ // clear Frev if unneeded
+ switch op {
+ case gc.OADD,
+ gc.OMUL:
+ flg &^= Frev
+ }
+
+ switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
+ case gc.OADD<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFADDF
+
+ case gc.OADD<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFADDD
+
+ case gc.OADD<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFADDDP
+
+ case gc.OSUB<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFSUBF
+
+ case gc.OSUB<<16 | (gc.TFLOAT32<<8 | Frev):
+ return i386.AFSUBRF
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFSUBD
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Frev):
+ return i386.AFSUBRD
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFSUBDP
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ return i386.AFSUBRDP
+
+ case gc.OMUL<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFMULF
+
+ case gc.OMUL<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFMULD
+
+ case gc.OMUL<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFMULDP
+
+ case gc.ODIV<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFDIVF
+
+ case gc.ODIV<<16 | (gc.TFLOAT32<<8 | Frev):
+ return i386.AFDIVRF
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFDIVD
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Frev):
+ return i386.AFDIVRD
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFDIVDP
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ return i386.AFDIVRDP
+
+ case gc.OCMP<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFCOMF
+
+ case gc.OCMP<<16 | (gc.TFLOAT32<<8 | Fpop):
+ return i386.AFCOMFP
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFCOMD
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFCOMDP
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop2):
+ return i386.AFCOMDPP
+
+ case gc.OMINUS<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFCHS
+
+ case gc.OMINUS<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFCHS
+ }
+
+ gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), gc.Tconv(t, 0), flg)
+ return 0
+
+sse:
+ switch uint32(op)<<16 | uint32(et) {
+ default:
+ gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = i386.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = i386.AUCOMISD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = i386.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = i386.AADDSD
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = i386.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = i386.ASUBSD
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = i386.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = i386.AMULSD
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = i386.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = i386.ADIVSD
+ }
+
+ return a
+}
+
+var resvd = []int{
+ // REG_DI, // for movstring
+ // REG_SI, // for movstring
+
+ i386.REG_AX, // for divide
+ i386.REG_CX, // for shift
+ i386.REG_DX, // for divide
+ i386.REG_SP, // for stack
+
+ i386.REG_BL, // because REG_BX can be allocated
+ i386.REG_BH,
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ reg[i] = 0
+ }
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ reg[i] = 0
+ }
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+}
+
+var regpc [i386.MAXREG]uint32
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated at %x", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ }
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() int {
+ var i int
+ var j int
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return 1
+ ok:
+ }
+
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] != 0 {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ switch et {
+ case gc.TINT64,
+ gc.TUINT64:
+ gc.Fatal("regalloc64")
+ fallthrough
+
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= i386.REG_AX && i <= i386.REG_DI {
+ goto out
+ }
+ }
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+
+ fmt.Printf("registers allocated at\n")
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+ goto err
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if !(gc.Use_sse != 0) {
+ i = i386.REG_F0
+ goto out
+ }
+
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= i386.REG_X0 && i <= i386.REG_X7 {
+ goto out
+ }
+ }
+
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ fmt.Printf("registers allocated at\n")
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ gc.Fatal("out of floating registers")
+ }
+
+ gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
+
+err:
+ gc.Nodreg(n, t, 0)
+ return
+
+out:
+ if i == i386.REG_SP {
+ fmt.Printf("alloc SP\n")
+ }
+ if reg[i] == 0 {
+ regpc[i] = uint32(obj.Getcallerpc(&n))
+ if i == i386.REG_AX || i == i386.REG_CX || i == i386.REG_DX || i == i386.REG_SP {
+ gc.Dump("regalloc-o", o)
+ gc.Fatal("regalloc %v", gc.Ctxt.Rconv(i))
+ }
+ }
+
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == i386.REG_SP {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 && (i == i386.REG_AX || i == i386.REG_CX || i == i386.REG_DX || i == i386.REG_SP) {
+ gc.Fatal("regfree %v", gc.Ctxt.Rconv(i))
+ }
+}
+
+/*
+ * generate
+ * as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+ var n1 gc.Node
+ var n2 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+ gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
+ gins(as, &n1, &n2)
+}
+
+/*
+ * swap node contents
+ */
+func nswap(a *gc.Node, b *gc.Node) {
+ var t gc.Node
+
+ t = *a
+ *a = *b
+ *b = t
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+ if ncon_n.Type == nil {
+ gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+ }
+ gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
+ return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+ var n1 gc.Node
+ var i int64
+
+ if !(gc.Is64(n.Type) != 0) {
+ gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
+ }
+
+ if nsclean >= len(sclean) {
+ gc.Fatal("split64 clean")
+ }
+ sclean[nsclean].Op = gc.OEMPTY
+ nsclean++
+ switch n.Op {
+ default:
+ switch n.Op {
+ default:
+ if !(dotaddable(n, &n1) != 0) {
+ igen(n, &n1, nil)
+ sclean[nsclean-1] = n1
+ }
+
+ n = &n1
+
+ case gc.ONAME:
+ if n.Class == gc.PPARAMREF {
+ cgen(n.Heapaddr, &n1)
+ sclean[nsclean-1] = n1
+ n = &n1
+ }
+
+ // nothing
+ case gc.OINDREG:
+ break
+ }
+
+ *lo = *n
+ *hi = *n
+ lo.Type = gc.Types[gc.TUINT32]
+ if n.Type.Etype == gc.TINT64 {
+ hi.Type = gc.Types[gc.TINT32]
+ } else {
+ hi.Type = gc.Types[gc.TUINT32]
+ }
+ hi.Xoffset += 4
+
+ case gc.OLITERAL:
+ gc.Convconst(&n1, n.Type, &n.Val)
+ i = gc.Mpgetfix(n1.Val.U.Xval)
+ gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+ i >>= 32
+ if n.Type.Etype == gc.TINT64 {
+ gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+ } else {
+ gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+ }
+ }
+}
+
+func splitclean() {
+ if nsclean <= 0 {
+ gc.Fatal("splitclean")
+ }
+ nsclean--
+ if sclean[nsclean].Op != gc.OEMPTY {
+ regfree(&sclean[nsclean])
+ }
+}
+
+/*
+ * set up nodes representing fp constants
+ */
+var zerof gc.Node
+
+var two64f gc.Node
+
+var two63f gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ two64f = *ncon(0)
+ two64f.Type = gc.Types[gc.TFLOAT64]
+ two64f.Val.Ctype = gc.CTFLT
+ two64f.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.)
+
+ two63f = two64f
+ two63f.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.)
+
+ zerof = two64f
+ zerof.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(zerof.Val.U.Fval, 0)
+}
+
+func memname(n *gc.Node, t *gc.Type) {
+ gc.Tempname(n, t)
+ n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
+ n.Orig.Sym = n.Sym
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var flo gc.Node
+ var fhi gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ if gc.Isfloat[ft] != 0 || gc.Isfloat[tt] != 0 {
+ floatmove(f, t)
+ return
+ }
+
+ // cannot have two integer memory operands;
+ // except 64-bit, which always copies via registers anyway.
+ if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TINT8<<16 | gc.TUINT8,
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TUINT8<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ case gc.TINT16<<16 | gc.TINT8, // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ goto rsrc
+
+ case gc.TINT64<<16 | gc.TINT8, // truncate low word
+ gc.TUINT64<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVB, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TINT16<<16 | gc.TUINT16,
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TUINT16<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ case gc.TINT32<<16 | gc.TINT16, // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ goto rsrc
+
+ case gc.TINT64<<16 | gc.TINT16, // truncate low word
+ gc.TUINT64<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVW, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = i386.AMOVL
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVL, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ split64(f, &flo, &fhi)
+
+ split64(t, &tlo, &thi)
+ if f.Op == gc.OLITERAL {
+ gins(i386.AMOVL, &flo, &tlo)
+ gins(i386.AMOVL, &fhi, &thi)
+ } else {
+ gc.Nodreg(&r1, gc.Types[gc.TUINT32], i386.REG_AX)
+ gc.Nodreg(&r2, gc.Types[gc.TUINT32], i386.REG_DX)
+ gins(i386.AMOVL, &flo, &r1)
+ gins(i386.AMOVL, &fhi, &r2)
+ gins(i386.AMOVL, &r1, &tlo)
+ gins(i386.AMOVL, &r2, &thi)
+ }
+
+ splitclean()
+ splitclean()
+ return
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16:
+ a = i386.AMOVBWSX
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = i386.AMOVBLSX
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64, // convert via int32
+ gc.TINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16:
+ a = i386.AMOVBWZX
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = i386.AMOVBLZX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = i386.AMOVWLSX
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64, // convert via int32
+ gc.TINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = i386.AMOVWLZX
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gc.Nodreg(&flo, tlo.Type, i386.REG_AX)
+ gc.Nodreg(&fhi, thi.Type, i386.REG_DX)
+ gmove(f, &flo)
+ gins(i386.ACDQ, nil, nil)
+ gins(i386.AMOVL, &flo, &tlo)
+ gins(i386.AMOVL, &fhi, &thi)
+ splitclean()
+ return
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gmove(f, &tlo)
+ gins(i386.AMOVL, ncon(0), &thi)
+ splitclean()
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register source
+rsrc:
+ regalloc(&r1, f.Type, t)
+
+ gmove(f, &r1)
+ gins(a, &r1, t)
+ regfree(&r1)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+}
+
+func floatmove(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var r2 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+ var f0 gc.Node
+ var f1 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var cx gc.Node
+ var cvt *gc.Type
+ var ft int
+ var tt int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ // cannot have two floating point memory operands.
+ if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+
+ // some constants can't move directly to memory.
+ if gc.Ismem(t) != 0 {
+ // float constants come from memory.
+ if gc.Isfloat[tt] != 0 {
+ goto hard
+ }
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ if gc.Use_sse != 0 {
+ floatmove_sse(f, t)
+ } else {
+ floatmove_387(f, t)
+ }
+ return
+
+ // float to very long integer.
+ case gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT64:
+ if f.Op == gc.OREGISTER {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &r1)
+ } else {
+ gins(i386.AFMOVD, f, &r1)
+ }
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+ if tt == gc.TINT16 {
+ gins(i386.AFMOVWP, &r1, t)
+ } else if tt == gc.TINT32 {
+ gins(i386.AFMOVLP, &r1, t)
+ } else {
+ gins(i386.AFMOVVP, &r1, t)
+ }
+ gins(i386.AFLDCW, &t1, nil)
+ return
+
+ case gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ if !(gc.Ismem(f) != 0) {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ bignodes()
+ gc.Nodreg(&f0, gc.Types[ft], i386.REG_F0)
+ gc.Nodreg(&f1, gc.Types[ft], i386.REG_F0+1)
+ gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
+
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &f0)
+ } else {
+ gins(i386.AFMOVD, f, &f0)
+ }
+
+ // if 0 > v { answer = 0 }
+ gins(i386.AFMOVD, &zerof, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+
+ // if 1<<64 <= v { answer = 0 too }
+ gins(i386.AFMOVD, &two64f, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gins(i386.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
+ split64(t, &tlo, &thi)
+ gins(i386.AMOVL, ncon(0), &tlo)
+ gins(i386.AMOVL, ncon(0), &thi)
+ splitclean()
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p2, gc.Pc)
+
+ // in range; algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+
+ // actual work
+ gins(i386.AFMOVD, &two63f, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
+ gins(i386.AFMOVVP, &f0, t)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p2, gc.Pc)
+ gins(i386.AFMOVD, &two63f, &f0)
+ gins(i386.AFSUBDP, &f0, &f1)
+ gins(i386.AFMOVVP, &f0, t)
+ split64(t, &tlo, &thi)
+ gins(i386.AXORL, ncon(0x80000000), &thi) // + 2^63
+ gc.Patch(p3, gc.Pc)
+ splitclean()
+
+ // restore rounding mode
+ gins(i386.AFLDCW, &t1, nil)
+
+ gc.Patch(p1, gc.Pc)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64:
+ if t.Op == gc.OREGISTER {
+ goto hardmem
+ }
+ gc.Nodreg(&f0, t.Type, i386.REG_F0)
+ gins(i386.AFMOVV, f, &f0)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &f0, t)
+ } else {
+ gins(i386.AFMOVDP, &f0, t)
+ }
+ return
+
+ // algorithm is:
+ // if small enough, use native int64 -> float64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ gc.Nodreg(&ax, gc.Types[gc.TUINT32], i386.REG_AX)
+
+ gc.Nodreg(&dx, gc.Types[gc.TUINT32], i386.REG_DX)
+ gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+ gc.Tempname(&t1, f.Type)
+ split64(&t1, &tlo, &thi)
+ gmove(f, &t1)
+ gins(i386.ACMPL, &thi, ncon(0))
+ p1 = gc.Gbranch(i386.AJLT, nil, 0)
+
+ // native
+ gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+
+ gins(i386.AFMOVV, &t1, &r1)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &r1, t)
+ } else {
+ gins(i386.AFMOVDP, &r1, t)
+ }
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ // simulated
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&tlo, &ax)
+ gmove(&thi, &dx)
+ p1 = gins(i386.ASHRL, ncon(1), &ax)
+ p1.From.Index = i386.REG_DX // double-width shift DX -> AX
+ p1.From.Scale = 0
+ gins(i386.AMOVL, ncon(0), &cx)
+ gins(i386.ASETCC, nil, &cx)
+ gins(i386.AORL, &cx, &ax)
+ gins(i386.ASHRL, ncon(1), &dx)
+ gmove(&dx, &thi)
+ gmove(&ax, &tlo)
+ gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+ gc.Nodreg(&r2, gc.Types[tt], i386.REG_F0+1)
+ gins(i386.AFMOVV, &t1, &r1)
+ gins(i386.AFMOVD, &r1, &r1)
+ gins(i386.AFADDDP, &r1, &r2)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &r1, t)
+ } else {
+ gins(i386.AFMOVDP, &r1, t)
+ }
+ gc.Patch(p2, gc.Pc)
+ splitclean()
+ return
+ }
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+}
+
+func floatmove_387(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var cvt *gc.Type
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var a int
+ var ft int
+ var tt int
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TINT64:
+ if t.Op == gc.OREGISTER {
+ goto hardmem
+ }
+ gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
+ if f.Op != gc.OREGISTER {
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &r1)
+ } else {
+ gins(i386.AFMOVD, f, &r1)
+ }
+ }
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+ if tt == gc.TINT16 {
+ gins(i386.AFMOVWP, &r1, t)
+ } else if tt == gc.TINT32 {
+ gins(i386.AFMOVLP, &r1, t)
+ } else {
+ gins(i386.AFMOVVP, &r1, t)
+ }
+ gins(i386.AFLDCW, &t1, nil)
+ return
+
+ // convert via int32.
+ case gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ gc.Tempname(&t1, gc.Types[gc.TINT32])
+
+ gmove(f, &t1)
+ switch tt {
+ default:
+ gc.Fatal("gmove %v", gc.Nconv(t, 0))
+ fallthrough
+
+ case gc.TINT8:
+ gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
+ gins(i386.ACMPL, &t1, ncon(0x7f))
+ p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ gmove(ncon(-0x80&(1<<32-1)), &t1)
+ gc.Patch(p3, gc.Pc)
+ gmove(&t1, t)
+
+ case gc.TUINT8:
+ gins(i386.ATESTL, ncon(0xffffff00), &t1)
+ p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ gins(i386.AMOVL, ncon(0), &t1)
+ gc.Patch(p1, gc.Pc)
+ gmove(&t1, t)
+
+ case gc.TUINT16:
+ gins(i386.ATESTL, ncon(0xffff0000), &t1)
+ p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ gins(i386.AMOVL, ncon(0), &t1)
+ gc.Patch(p1, gc.Pc)
+ gmove(&t1, t)
+ }
+
+ return
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ /*
+ * integer to float
+ */
+ case gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64:
+ if t.Op != gc.OREGISTER {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ switch ft {
+ case gc.TINT16:
+ a = i386.AFMOVW
+
+ case gc.TINT32:
+ a = i386.AFMOVL
+
+ default:
+ a = i386.AFMOVV
+ }
+
+ // convert via int32 memory
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hardmem
+
+ // convert via int64 memory
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ // The way the code generator uses floating-point
+ // registers, a move from F0 to F0 is intended as a no-op.
+ // On the x86, it's not: it pushes a second copy of F0
+ // on the floating point stack. So toss it away here.
+ // Also, F0 is the *only* register we ever evaluate
+ // into, so we should only see register/register as F0/F0.
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32,
+ gc.TFLOAT64<<16 | gc.TFLOAT64:
+ if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ if f.Val.U.Reg != i386.REG_F0 || t.Val.U.Reg != i386.REG_F0 {
+ goto fatal
+ }
+ return
+ }
+
+ a = i386.AFMOVF
+ if ft == gc.TFLOAT64 {
+ a = i386.AFMOVD
+ }
+ if gc.Ismem(t) != 0 {
+ if f.Op != gc.OREGISTER || f.Val.U.Reg != i386.REG_F0 {
+ gc.Fatal("gmove %v", gc.Nconv(f, 0))
+ }
+ a = i386.AFMOVFP
+ if ft == gc.TFLOAT64 {
+ a = i386.AFMOVDP
+ }
+ }
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ if f.Val.U.Reg != i386.REG_F0 || t.Val.U.Reg != i386.REG_F0 {
+ goto fatal
+ }
+ return
+ }
+
+ if f.Op == gc.OREGISTER {
+ gins(i386.AFMOVDP, f, t)
+ } else {
+ gins(i386.AFMOVF, f, t)
+ }
+ return
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
+ gins(i386.AFMOVFP, f, &r1)
+ gins(i386.AFMOVF, &r1, t)
+ return
+ }
+
+ if f.Op == gc.OREGISTER {
+ gins(i386.AFMOVFP, f, t)
+ } else {
+ gins(i386.AFMOVD, f, t)
+ }
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+
+ return
+}
+
+func floatmove_sse(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var cvt *gc.Type
+ var a int
+ var ft int
+ var tt int
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ // should not happen
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+
+ return
+
+ // convert via int32.
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ case gc.TFLOAT32<<16 | gc.TINT32:
+ a = i386.ACVTTSS2SL
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT32:
+ a = i386.ACVTTSD2SL
+ goto rdst
+
+ // convert via int32 memory
+ /*
+ * integer to float
+ */
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64 memory
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ case gc.TINT32<<16 | gc.TFLOAT32:
+ a = i386.ACVTSL2SS
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TFLOAT64:
+ a = i386.ACVTSL2SD
+ goto rdst
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = i386.ACVTSS2SD
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = i386.ACVTSD2SS
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) int {
+ if f.Op != t.Op {
+ return 0
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ var af obj.Addr
+ var at obj.Addr
+ var w int
+
+ if as == i386.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
+ gc.Fatal("gins MOVF reg, reg")
+ }
+ if as == i386.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
+ gc.Fatal("gins CVTSD2SS const")
+ }
+ if as == i386.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == i386.REG_F0 {
+ gc.Fatal("gins MOVSD into F0")
+ }
+
+ switch as {
+ case i386.AMOVB,
+ i386.AMOVW,
+ i386.AMOVL:
+ if f != nil && t != nil && samaddr(f, t) != 0 {
+ return nil
+ }
+
+ case i386.ALEAL:
+ if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+ gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
+ }
+ }
+
+ af = obj.Addr{}
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case i386.AMOVB:
+ w = 1
+
+ case i386.AMOVW:
+ w = 2
+
+ case i386.AMOVL:
+ w = 4
+ }
+
+ if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) {
+ gc.Dump("bad width from:", f)
+ gc.Dump("bad width to:", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ if p.To.Type == obj.TYPE_ADDR && w > 0 {
+ gc.Fatal("bad use of addr: %v", p)
+ }
+
+ return p
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) int {
+ var o int
+ var oary [10]int64
+ var nn *gc.Node
+
+ if n.Op != gc.ODOT {
+ return 0
+ }
+
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ *n1 = *nn
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ return 1
+ }
+
+ return 0
+}
+
+func sudoclean() {
+}
+
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+ *a = obj.Addr{}
+ return 0
+}
diff --git a/src/cmd/new8g/peep.go b/src/cmd/new8g/peep.go
new file mode 100644
index 0000000000..523ce9efe2
--- /dev/null
+++ b/src/cmd/new8g/peep.go
@@ -0,0 +1,847 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+const (
+ REGEXT = 0
+ exregoffset = i386.REG_DI
+)
+
+var gactive uint32
+
+// do we need the carry bit
+func needc(p *obj.Prog) int {
+ var info gc.ProgInfo
+
+ for p != nil {
+ proginfo(&info, p)
+ if info.Flags&gc.UseCarry != 0 {
+ return 1
+ }
+ if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
+ return 0
+ }
+ p = p.Link
+ }
+
+ return 0
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+ var p *obj.Prog
+ var r1 *gc.Flow
+
+ if r != nil {
+ for {
+ p = r.Prog
+ if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+ break
+ }
+ r1 = gc.Uniqs(r)
+ if r1 == nil {
+ break
+ }
+ r = r1
+ }
+ }
+
+ return r
+}
+
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ // byte, word arithmetic elimination.
+ elimshortmov(g)
+
+ // constant propagation
+ // find MOV $con,R followed by
+ // another MOV $con,R without
+ // setting R in the interim
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case i386.ALEAL:
+ if regtyp(&p.To) != 0 {
+ if p.From.Sym != nil {
+ if p.From.Index == i386.REG_NONE {
+ conprop(r)
+ }
+ }
+ }
+
+ case i386.AMOVB,
+ i386.AMOVW,
+ i386.AMOVL,
+ i386.AMOVSS,
+ i386.AMOVSD:
+ if regtyp(&p.To) != 0 {
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+ conprop(r)
+ }
+ }
+ }
+ }
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case i386.AMOVL,
+ i386.AMOVSS,
+ i386.AMOVSD:
+ if regtyp(&p.To) != 0 {
+ if regtyp(&p.From) != 0 {
+ if copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ } else if subprop(r) != 0 && copyprop(g, r) != 0 {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ case i386.AMOVBLZX,
+ i386.AMOVWLZX,
+ i386.AMOVBLSX,
+ i386.AMOVWLSX:
+ if regtyp(&p.To) != 0 {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = i386.AMOVL
+ t++
+ }
+ }
+ }
+
+ case i386.AADDL,
+ i386.AADDW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == i386.AADDL {
+ p.As = i386.ADECL
+ } else {
+ p.As = i386.ADECW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == i386.AADDL {
+ p.As = i386.AINCL
+ } else {
+ p.As = i386.AINCW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ case i386.ASUBL,
+ i386.ASUBW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == i386.ASUBL {
+ p.As = i386.AINCL
+ } else {
+ p.As = i386.AINCW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == i386.ASUBL {
+ p.As = i386.ADECL
+ } else {
+ p.As = i386.ADECW
+ }
+ p.From = obj.Zprog.From
+ break
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ // MOVSD removal.
+ // We never use packed registers, so a MOVSD between registers
+ // can be replaced by MOVAPD, which moves the pair of float64s
+ // instead of just the lower one. We only use the lower one, but
+ // the processor can do better if we do moves using both.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if p.As == i386.AMOVSD {
+ if regtyp(&p.From) != 0 {
+ if regtyp(&p.To) != 0 {
+ p.As = i386.AMOVAPD
+ }
+ }
+ }
+ }
+
+ gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+
+ obj.Nopout(p)
+
+ gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7))
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible. a movb into a register
+// can smash the entire 64-bit register without
+// causing any trouble.
+func elimshortmov(g *gc.Graph) {
+ var p *obj.Prog
+ var r *gc.Flow
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if regtyp(&p.To) != 0 {
+ switch p.As {
+ case i386.AINCB,
+ i386.AINCW:
+ p.As = i386.AINCL
+
+ case i386.ADECB,
+ i386.ADECW:
+ p.As = i386.ADECL
+
+ case i386.ANEGB,
+ i386.ANEGW:
+ p.As = i386.ANEGL
+
+ case i386.ANOTB,
+ i386.ANOTW:
+ p.As = i386.ANOTL
+ }
+
+ if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+ // move or artihmetic into partial register.
+ // from another register or constant can be movl.
+ // we don't switch to 32-bit arithmetic if it can
+ // change how the carry bit is set (and the carry bit is needed).
+ switch p.As {
+ case i386.AMOVB,
+ i386.AMOVW:
+ p.As = i386.AMOVL
+
+ case i386.AADDB,
+ i386.AADDW:
+ if !(needc(p.Link) != 0) {
+ p.As = i386.AADDL
+ }
+
+ case i386.ASUBB,
+ i386.ASUBW:
+ if !(needc(p.Link) != 0) {
+ p.As = i386.ASUBL
+ }
+
+ case i386.AMULB,
+ i386.AMULW:
+ p.As = i386.AMULL
+
+ case i386.AIMULB,
+ i386.AIMULW:
+ p.As = i386.AIMULL
+
+ case i386.AANDB,
+ i386.AANDW:
+ p.As = i386.AANDL
+
+ case i386.AORB,
+ i386.AORW:
+ p.As = i386.AORL
+
+ case i386.AXORB,
+ i386.AXORW:
+ p.As = i386.AXORL
+
+ case i386.ASHLB,
+ i386.ASHLW:
+ p.As = i386.ASHLL
+ }
+ } else {
+ // explicit zero extension
+ switch p.As {
+ case i386.AMOVB:
+ p.As = i386.AMOVBLZX
+
+ case i386.AMOVW:
+ p.As = i386.AMOVWLZX
+ }
+ }
+ }
+ }
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !(regtyp(v1) != 0) {
+ return 0
+ }
+ v2 = &p.To
+ if !(regtyp(v2) != 0) {
+ return 0
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\t? %v\n", r.Prog)
+ }
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return 0
+ }
+
+ if info.Reguse|info.Regset != 0 {
+ return 0
+ }
+
+ if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+ goto gotit
+ }
+
+ if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return 0
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return 1
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) != 0 {
+ return 1
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return 1
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if !(f != 0) && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return 0
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if !(gc.Debug['P'] != 0) {
+ return 0
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return 0
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return 0
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+ }
+ }
+
+ if !(f != 0) {
+ t = copyu(p, v1, nil)
+ if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !(copy1(v1, v2, r.S2, f) != 0) {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ var info gc.ProgInfo
+
+ switch p.As {
+ case obj.AJMP:
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case obj.ARET:
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case obj.ACALL:
+ if REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= REGEXT && v.Reg > exregoffset {
+ return 2
+ }
+ if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
+ return 2
+ }
+ if v.Type == p.From.Type && v.Reg == p.From.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 4
+ }
+ return 3
+
+ case obj.ATEXT:
+ if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
+ return 3
+ }
+ return 0
+ }
+
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ return 0
+ }
+ proginfo(&info, p)
+
+ if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
+ return 2
+ }
+
+ if info.Flags&gc.LeftAddr != 0 {
+ if copyas(&p.From, v) != 0 {
+ return 2
+ }
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+ if copyas(&p.To, v) != 0 {
+ return 2
+ }
+ }
+
+ if info.Flags&gc.RightWrite != 0 {
+ if copyas(&p.To, v) != 0 {
+ if s != nil {
+ return copysub(&p.From, v, s, 1)
+ }
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+ }
+
+ if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) int {
+ if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
+ gc.Fatal("use of byte register")
+ }
+ if i386.REG_AL <= v.Reg && v.Reg <= i386.REG_BL {
+ gc.Fatal("use of byte register")
+ }
+
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return 0
+ }
+ if regtyp(v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return 1
+ }
+ }
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) int {
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return 0
+ }
+ if regtyp(v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return 1
+ }
+ }
+ return 0
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) int {
+ if copyas(a, v) != 0 {
+ return 1
+ }
+ if regtyp(v) != 0 {
+ if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+ return 1
+ }
+ if a.Index == v.Reg {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ var reg int
+
+ if copyas(a, v) != 0 {
+ reg = int(s.Reg)
+ if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
+ if f != 0 {
+ a.Reg = int16(reg)
+ }
+ }
+
+ return 0
+ }
+
+ if regtyp(v) != 0 {
+ reg = int(v.Reg)
+ if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
+ return 1 /* can't use BP-base with index */
+ }
+ if f != 0 {
+ a.Reg = s.Reg
+ }
+ }
+
+ // return 0;
+ if int(a.Index) == reg {
+ if f != 0 {
+ a.Index = s.Reg
+ }
+ return 0
+ }
+
+ return 0
+ }
+
+ return 0
+}
+
+func conprop(r0 *gc.Flow) {
+ var r *gc.Flow
+ var p *obj.Prog
+ var p0 *obj.Prog
+ var t int
+ var v0 *obj.Addr
+
+ p0 = r0.Prog
+ v0 = &p0.To
+ r = r0
+
+loop:
+ r = gc.Uniqs(r)
+ if r == nil || r == r0 {
+ return
+ }
+ if gc.Uniqp(r) == nil {
+ return
+ }
+
+ p = r.Prog
+ t = copyu(p, v0, nil)
+ switch t {
+ case 0, // miss
+ 1: // use
+ goto loop
+
+ case 2, // rar
+ 4: // use and set
+ break
+
+ case 3: // set
+ if p.As == p0.As {
+ if p.From.Type == p0.From.Type {
+ if p.From.Reg == p0.From.Reg {
+ if p.From.Node == p0.From.Node {
+ if p.From.Offset == p0.From.Offset {
+ if p.From.Scale == p0.From.Scale {
+ if p.From.Type == obj.TYPE_FCONST && p.From.U.Dval == p0.From.U.Dval {
+ if p.From.Index == p0.From.Index {
+ excise(r)
+ goto loop
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) int {
+ return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+}
+
+func stackaddr(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP)
+}
diff --git a/src/cmd/new8g/prog.go b/src/cmd/new8g/prog.go
new file mode 100644
index 0000000000..d8e46e5108
--- /dev/null
+++ b/src/cmd/new8g/prog.go
@@ -0,0 +1,291 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+var (
+ AX = RtoB(i386.REG_AX)
+ BX = RtoB(i386.REG_BX)
+ CX = RtoB(i386.REG_CX)
+ DX = RtoB(i386.REG_DX)
+ DI = RtoB(i386.REG_DI)
+ SI = RtoB(i386.REG_SI)
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [i386.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ i386.AADCL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AADCW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AADDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AADDSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AANDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AANDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AANDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ obj.ACALL: gc.ProgInfo{gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+ i386.ACDQ: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ i386.ACWD: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ i386.ACLD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ i386.ASTD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ i386.ACMPB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACMPL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACMPW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACVTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSD2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSL2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSL2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSS2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ADECB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.ADECL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.ADECW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.ADIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.ADIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.ADIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.ADIVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ADIVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AFLDCW: gc.ProgInfo{gc.SizeW | gc.LeftAddr, 0, 0, 0},
+ i386.AFSTCW: gc.ProgInfo{gc.SizeW | gc.RightAddr, 0, 0, 0},
+ i386.AFSTSW: gc.ProgInfo{gc.SizeW | gc.RightAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFADDD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFADDDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFADDF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFCOMD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMDPP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMFP: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFUCOMIP: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCHS: gc.ProgInfo{gc.SizeD | RightRdwr, 0, 0, 0}, // also SizeF
+
+ i386.AFDIVDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFXCHD: gc.ProgInfo{gc.SizeD | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AFSUBD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMOVD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVL: gc.ProgInfo{gc.SizeL | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVW: gc.ProgInfo{gc.SizeW | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVV: gc.ProgInfo{gc.SizeQ | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+
+ // These instructions are marked as RightAddr
+ // so that the register optimizer does not try to replace the
+ // memory references with integer register references.
+ // But they do not use the previous value at the address, so
+ // we also mark them RightWrite.
+ i386.AFMOVDP: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVFP: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVLP: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVWP: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVVP: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMULD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMULDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMULF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AIDIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AIDIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.AIDIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.AIMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AIMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ i386.AIMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ i386.AINCB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.AINCL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.AINCW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.AJCC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJCS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJEQ: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJGE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJGT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJHI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJMI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJNE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJOC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJOS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPL: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ obj.AJMP: gc.ProgInfo{gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+ i386.ALEAL: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AMOVBLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBWSX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBWZX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVWLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVWLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVSB: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ i386.AMOVSL: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ i386.AMOVSW: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.OK, DI | SI, DI | SI | CX, 0},
+ i386.AMOVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // We use MOVAPD as a faster synonym for MOVSD.
+ i386.AMOVAPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ i386.AMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ i386.AMULSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AMULSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ANEGB: gc.ProgInfo{gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANEGL: gc.ProgInfo{gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANEGW: gc.ProgInfo{gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANOTB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.ANOTL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.ANOTW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.AORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.APOPL: gc.ProgInfo{gc.SizeL | gc.RightWrite, 0, 0, 0},
+ i386.APUSHL: gc.ProgInfo{gc.SizeL | gc.LeftRead, 0, 0, 0},
+ i386.ARCLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AREP: gc.ProgInfo{gc.OK, CX, CX, 0},
+ i386.AREPN: gc.ProgInfo{gc.OK, CX, CX, 0},
+ obj.ARET: gc.ProgInfo{gc.Break | gc.KillCarry, 0, 0, 0},
+ i386.AROLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.AROLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.AROLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASAHF: gc.ProgInfo{gc.OK, AX, AX, 0},
+ i386.ASALB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASALL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASALW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASBBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASBBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASBBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASETCC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETCS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETEQ: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETGE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETGT: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETHI: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLT: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETMI: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETNE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETOC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETOS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPL: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASHLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASTOSB: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASTOSL: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASTOSW: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASUBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ASUBSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ATESTB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ATESTL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ATESTW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.AUCOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ i386.AUCOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ i386.AXCHGB: gc.ProgInfo{gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXCHGL: gc.ProgInfo{gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXCHGW: gc.ProgInfo{gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AXORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AXORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+ info.Reguse |= CX
+ }
+
+ if info.Flags&gc.ImulAXDX != 0 {
+ if p.To.Type == obj.TYPE_NONE {
+ info.Reguse |= AX
+ info.Regset |= AX | DX
+ } else {
+ info.Flags |= RightRdwr
+ }
+ }
+
+ // Addressing makes some registers used.
+ if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ }
+ if p.From.Index != i386.REG_NONE {
+ info.Regindex |= RtoB(int(p.From.Index))
+ }
+ if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ }
+ if p.To.Index != i386.REG_NONE {
+ info.Regindex |= RtoB(int(p.To.Index))
+ }
+}
diff --git a/src/cmd/new8g/reg.go b/src/cmd/new8g/reg.go
new file mode 100644
index 0000000000..76bd260f54
--- /dev/null
+++ b/src/cmd/new8g/reg.go
@@ -0,0 +1,112 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/i386"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 16
+)
+
+var regname = []string{
+ ".ax",
+ ".cx",
+ ".dx",
+ ".bx",
+ ".sp",
+ ".bp",
+ ".si",
+ ".di",
+ ".x0",
+ ".x1",
+ ".x2",
+ ".x3",
+ ".x4",
+ ".x5",
+ ".x6",
+ ".x7",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(i386.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+ var b uint64
+
+ b = 0
+ if r >= i386.REG_AX && r <= i386.REG_DI {
+ b |= RtoB(r)
+ } else if r >= i386.REG_AL && r <= i386.REG_BL {
+ b |= RtoB(r - i386.REG_AL + i386.REG_AX)
+ } else if r >= i386.REG_AH && r <= i386.REG_BH {
+ b |= RtoB(r - i386.REG_AH + i386.REG_AX)
+ } else if r >= i386.REG_X0 && r <= i386.REG_X0+7 {
+ b |= FtoB(r)
+ }
+ return b
+}
+
+func RtoB(r int) uint64 {
+ if r < i386.REG_AX || r > i386.REG_DI {
+ return 0
+ }
+ return 1 << uint(r-i386.REG_AX)
+}
+
+func BtoR(b uint64) int {
+ b &= 0xff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + i386.REG_AX
+}
+
+func FtoB(f int) uint64 {
+ if f < i386.REG_X0 || f > i386.REG_X7 {
+ return 0
+ }
+ return 1 << uint(f-i386.REG_X0+8)
+}
+
+func BtoF(b uint64) int {
+ b &= 0xFF00
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 8 + i386.REG_X0
+}
diff --git a/src/cmd/new8g/util.go b/src/cmd/new8g/util.go
new file mode 100644
index 0000000000..bb5eedb15a
--- /dev/null
+++ b/src/cmd/new8g/util.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/src/cmd/new9g/cgen.go b/src/cmd/new9g/cgen.go
new file mode 100644
index 0000000000..84ee97f741
--- /dev/null
+++ b/src/cmd/new9g/cgen.go
@@ -0,0 +1,1889 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var a int
+ var f int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ //print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ goto ret
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || !(res.Addable != 0) {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ goto ret
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ if !(res.Addable != 0) {
+ if n.Ullman > res.Ullman {
+ regalloc(&n1, n.Type, res)
+ cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ gc.Dump("n1", &n1)
+ gc.Dump("res", res)
+ gc.Fatal("loop in cgen")
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if res.Ullman >= gc.UINF {
+ goto gen
+ }
+
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case gc.OLITERAL:
+ if gc.Smallintconst(n) != 0 {
+ f = 0
+ }
+
+ case gc.OREGISTER:
+ f = 0
+ }
+
+ if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ a = optoas(gc.OAS, res.Type)
+ if sudoaddable(a, res, &addr) != 0 {
+ if f != 0 {
+ regalloc(&n2, res.Type, nil)
+ cgen(n, &n2)
+ p1 = gins(a, &n2, nil)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, n, nil)
+ }
+ p1.To = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ sudoclean()
+ goto ret
+ }
+ }
+
+ gen:
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) != 0 {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if gc.Complexop(n, res) != 0 {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 {
+ if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
+ gmove(n, res)
+ } else {
+ regalloc(&n1, n.Type, nil)
+ gmove(n, &n1)
+ cgen(&n1, res)
+ regfree(&n1)
+ }
+
+ goto ret
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr) != 0 {
+ if res.Op == gc.OREGISTER {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ } else {
+ regalloc(&n2, n.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ gins(a, &n2, res)
+ regfree(&n2)
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ // TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
+ // OGE, OLE, and ONE ourselves.
+ // if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(1), res)
+ p3 = gc.Gbranch(ppc64.ABR, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(0), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ case gc.OMINUS:
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = optoas(gc.OMUL, nl.Type)
+ goto sbop
+ }
+
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME:
+ igen(nl, &n1, res)
+ regalloc(&n2, n.Type, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+ goto ret
+ }
+ }
+
+ regalloc(&n1, nl.Type, res)
+ regalloc(&n2, n.Type, &n1)
+ cgen(nl, &n1)
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ gmove(&n1, &n2)
+
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(ppc64.AMOVD, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Widthint)
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ if n.Bounded != 0 { // let race detector avoid nil checks
+ gc.Disable_checknil++
+ }
+ agen(nl, res)
+ if n.Bounded != 0 {
+ gc.Disable_checknil--
+ }
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ if gc.Isfloat[n.Type.Etype] != 0 {
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ cgen_div(int(n.Op), &n1, nr, res)
+ regfree(&n1)
+ } else {
+ if !(gc.Smallintconst(nr) != 0) {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(int(n.Op), nl, &n2, res)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ }
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ }
+
+ goto ret
+
+ /*
+ * put simplest on right - we'll generate into left
+ * and then adjust it using the computation of right.
+ * constants and variables have the same ullman
+ * count, so look for constants specially.
+ *
+ * an integer constant we can use as an immediate
+ * is simpler than a variable - we can use the immediate
+ * in the adjustment instruction directly - so it goes
+ * on the right.
+ *
+ * other constants, like big integers or floating point
+ * constants, require a mov into a register, so those
+ * might as well go on the left, so we can reuse that
+ * register for the computation.
+ */
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+
+ /*
+ * This generates smaller code - it avoids a MOV - but it's
+ * easily 10% slower due to not being able to
+ * optimize/manipulate the move.
+ * To see, run: go test -bench . crypto/md5
+ * with and without.
+ *
+ if(sudoaddable(a, nr, &addr)) {
+ p1 = gins(a, N, &n1);
+ p1->from = addr;
+ gmove(&n1, res);
+ sudoclean();
+ regfree(&n1);
+ goto ret;
+ }
+ *
+ */
+ // TODO(minux): enable using constants directly in certain instructions.
+ //if(smallintconst(nr))
+ // n2 = *nr;
+ //else {
+ regalloc(&n2, nr.Type, nil)
+
+ cgen(nr, &n2)
+ } else //}
+ {
+ //if(smallintconst(nr))
+ // n2 = *nr;
+ //else {
+ regalloc(&n2, nr.Type, res)
+
+ cgen(nr, &n2)
+
+ //}
+ regalloc(&n1, nl.Type, nil)
+
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+
+ // Normalize result for types smaller than word.
+ if n.Type.Width < int64(gc.Widthreg) {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLSH:
+ gins(optoas(gc.OAS, n.Type), &n1, &n1)
+ }
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+uop: // unary
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) != 0 {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, n.Type, res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var w uint32
+ var v uint64
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("agenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+
+ //bounded = debug['B'] || n->bounded;
+ if nr.Addable != 0 {
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ }
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ cgen(nr, &tmp)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(nr, &tmp)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ } else {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(nr, &tmp)
+ nr = &tmp
+ if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ agenr(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Fatal("constant string constant index")
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, n1.Type, nil)
+ gmove(&n1, &n4)
+ ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
+ regfree(&n4)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if v*uint64(w) != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
+ }
+
+ *a = n3
+ break
+ }
+
+ regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ // check bounds
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ gmove(&n1, &n4)
+ } else {
+ if nl.Type.Bound < (1<<15)-1 {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ p1 = gins(ppc64.AMOVD, nil, &n4)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = nl.Type.Bound
+ }
+ }
+
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
+ if n4.Op == gc.OREGISTER {
+ regfree(&n4)
+ }
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) != 0 {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(ppc64.AMOVD, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 {
+ /* w already scaled */
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ /* else if(w == 2 || w == 4 || w == 8) {
+ // TODO(minux): scale using shift
+ } */
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w))
+ gmove(&n1, &n4)
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ regfree(&n4)
+ }
+
+ *a = n3
+ regfree(&n2)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+func ginsadd(as int, off int64, dst *gc.Node) {
+ var n1 gc.Node
+
+ regalloc(&n1, gc.Types[gc.Tptr], dst)
+ gmove(dst, &n1)
+ ginscon(as, off, &n1)
+ gmove(&n1, dst)
+ regfree(&n1)
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ n3 = gc.Node{}
+ n3.Op = gc.OADDR
+ n3.Left = &n1
+ gins(ppc64.AMOVD, &n3, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ n1 = gc.Node{}
+ n1.Op = gc.OADDR
+ n1.Left = n
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(ppc64.AMOVD, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // TODO(minux): 5g has this: Release res so that it is available for cgen_call.
+ // Pick it up again after the call for OCALLMETH and OCALLFUNC.
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != ppc64.REGSP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.ODOTPTR:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = ppc64.REGSP
+ a.Addable = 1
+ a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ // Compute &a.
+ if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(1)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ switch n.Op {
+ default:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = ppc64.ABNE
+ if !true_ {
+ a = ppc64.ABEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ goto ret
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == !(n.Val.U.Bval != 0) {
+ gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
+ }
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nr.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+
+ p2 = gc.Gbranch(ppc64.ABR, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit // avoid re-genning ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ regalloc(&n2, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, &n2)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
+ regfree(&n2)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) != 0 {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ regalloc(&n2, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, &n2)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
+ regfree(&n2)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ goto cmp
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ // TODO(minux): cmpi does accept 16-bit signed immediate as p->to.
+ // and cmpli accepts 16-bit unsigned immediate.
+ //if(smallintconst(nr)) {
+ // gins(optoas(OCMP, nr->type), &n1, nr);
+ // patch(gbranch(optoas(a, nr->type), nr->type, likely), to);
+ // regfree(&n1);
+ // break;
+ //}
+
+ regalloc(&n2, nr.Type, nil)
+
+ cgen(nr, &n2)
+
+ cmp:
+ l = &n1
+ r = &n2
+ gins(optoas(gc.OCMP, nr.Type), l, r)
+ if gc.Isfloat[nr.Type.Etype] != 0 && (a == gc.OLE || a == gc.OGE) {
+ // To get NaN right, must rewrite x <= y into separate x < y or x = y.
+ switch a {
+ case gc.OLE:
+ a = gc.OLT
+
+ case gc.OGE:
+ a = gc.OGT
+ }
+
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to)
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ }
+
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int64 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int64
+
+ switch n.Op {
+ case gc.OINDREG:
+ return n.Xoffset
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !(gc.Isfixedarray(t) != 0) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&ns, &n, w);
+ */
+func sgen(n *gc.Node, ns *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tmp gc.Node
+ var nend gc.Node
+ var c int32
+ var odst int32
+ var osrc int32
+ var dir int
+ var align int
+ var op int
+ var p *obj.Prog
+ var ploop *obj.Prog
+ var l *gc.NodeList
+ var res *gc.Node = ns
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", ns)
+ }
+
+ if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ //if(componentgen(n, ns))
+ // return;
+ if w == 0 {
+ // evaluate side effects only.
+ regalloc(&dst, gc.Types[gc.Tptr], nil)
+
+ agen(res, &dst)
+ agen(n, &dst)
+ regfree(&dst)
+ return
+ }
+
+ // determine alignment.
+ // want to avoid unaligned access, so have to use
+ // smaller operations for less aligned types.
+ // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+ align = int(n.Type.Align)
+
+ switch align {
+ default:
+ gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
+ fallthrough
+
+ case 1:
+ op = ppc64.AMOVBU
+
+ case 2:
+ op = ppc64.AMOVHU
+
+ case 4:
+ op = ppc64.AMOVWZU // there is no lwau, only lwaux
+
+ case 8:
+ op = ppc64.AMOVDU
+ }
+
+ if w%int64(align) != 0 {
+ gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
+ }
+ c = int32(w / int64(align))
+
+ // offset on the stack
+ osrc = int32(stkof(n))
+
+ odst = int32(stkof(res))
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, res, w)
+ return
+ }
+
+ if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ }
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ dir = align
+
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ dir = -dir
+ }
+
+ if n.Ullman >= res.Ullman {
+ agenr(n, &dst, res) // temporarily use dst
+ regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(ppc64.AMOVD, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agenr(res, &dst, res)
+ agenr(n, &src, nil)
+ }
+
+ regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+ // set up end marker
+ nend = gc.Node{}
+
+ // move src and dest to the end of block if necessary
+ if dir < 0 {
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &src, &nend)
+ }
+
+ p = gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+ } else {
+ p = gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &src, &nend)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w
+ }
+ }
+
+ // move
+ // TODO: enable duffcopy for larger copies.
+ if c >= 4 {
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ ploop = p
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+
+ p = gins(ppc64.ACMP, &src, &nend)
+
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+ regfree(&nend)
+ } else {
+ // TODO(austin): Instead of generating ADD $-8,R8; ADD
+ // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+ // generate the offsets directly and eliminate the
+ // ADDs. That will produce shorter, more
+ // pipeline-able code.
+ for {
+ tmp14 := c
+ c--
+ if !(tmp14 > 0) {
+ break
+ }
+
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ }
+ }
+
+ regfree(&dst)
+ regfree(&src)
+ regfree(&tmp)
+}
+
+func cadable(n *gc.Node) int {
+ if !(n.Addable != 0) {
+ // dont know how it happens,
+ // but it does
+ return 0
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return 1
+ }
+
+ return 0
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) int {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) != 0 {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) != 0 {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !(cadable(nl) != 0) {
+ if nr != nil && !(cadable(nr) != 0) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !(cadable(nr) != 0) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !(gc.Isslice(t) != 0) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 0
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return 1
+}
diff --git a/src/cmd/new9g/galign.go b/src/cmd/new9g/galign.go
new file mode 100644
index 0000000000..a3ce760e0e
--- /dev/null
+++ b/src/cmd/new9g/galign.go
@@ -0,0 +1,92 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+import "cmd/internal/gc"
+
+var thechar int = '9'
+
+var thestring string = "ppc64"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+ thestring = obj.Getgoarch()
+ gc.Thearch.Thestring = thestring
+ if thestring == "ppc64le" {
+ thelinkarch = &ppc64.Linkppc64le
+ } else {
+ thelinkarch = &ppc64.Linkppc64
+ }
+ gc.Thearch.Thelinkarch = thelinkarch
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT64},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = ppc64.REGSP
+ gc.Thearch.REGCTXT = ppc64.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
diff --git a/src/cmd/new9g/gg.go b/src/cmd/new9g/gg.go
new file mode 100644
index 0000000000..068d8afe53
--- /dev/null
+++ b/src/cmd/new9g/gg.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/ppc64"
+import "cmd/internal/gc"
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var reg [ppc64.NREG + ppc64.NFREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
+
+/*
+ * reg.c
+ */
diff --git a/src/cmd/new9g/ggen.go b/src/cmd/new9g/ggen.go
new file mode 100644
index 0000000000..1b335abdd2
--- /dev/null
+++ b/src/cmd/new9g/ggen.go
@@ -0,0 +1,1060 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !(n.Needzero != 0) {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+ var cnt int64
+ var i int64
+ var p1 *obj.Prog
+ var f *gc.Node
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) {
+ p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ f = gc.Sysfunc("duffzero")
+ gc.Naddr(f, &p.To, 1)
+ gc.Afunclit(&p.To, f)
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ p1 = p
+ p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate: BL reg, f
+ * where both reg and f are registers.
+ * On power, f must be moved to CTR first.
+ */
+func ginsBL(reg *gc.Node, f *gc.Node) {
+ var p *obj.Prog
+ p = gins(ppc64.AMOVD, f, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+ p = gins(ppc64.ABL, reg, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var con gc.Node
+ var reg2 gc.Node
+ var r1 gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert a ppc64 NOP that we will have the right line number.
+ // The ppc64 NOP is really or r0, r0, r0; use that description
+ // because the NOP pseudo-instruction would be removed by
+ // the linker.
+ gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
+
+ gins(ppc64.AOR, &reg, &reg)
+ }
+
+ p = gins(ppc64.ABL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) != 0 {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(&reg, gc.Types[gc.Tptr], ppc64.REGCTXT)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
+ gmove(f, &reg)
+ reg.Op = gc.OINDREG
+ gmove(&reg, &r1)
+ reg.Op = gc.OREGISTER
+ ginsBL(&reg, &r1)
+
+ case 3: // normal call of c function pointer
+ ginsBL(nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
+
+ gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
+ gc.Nodreg(&reg2, gc.Types[gc.TINT64], ppc64.REG_R4)
+ gmove(f, &reg)
+
+ gmove(&con, &reg2)
+ p = gins(ppc64.AMOVW, &reg2, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REGSP
+ p.To.Offset = 8
+
+ p = gins(ppc64.AMOVD, &reg, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REGSP
+ p.To.Offset = 16
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ if !(gc.Hasdefer != 0) {
+ gc.Fatal("hasdefer=0 but has defer")
+ }
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
+ p = gins(ppc64.ACMP, &reg, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+ p = gc.Gbranch(ppc64.ABEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+ var p *obj.Prog
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if !(i.Addable != 0) {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
+
+ nodsp.Xoffset = int64(gc.Widthptr)
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ p = gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ p.From.Type = obj.TYPE_ADDR
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = ppc64.REGSP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = ppc64.REGSP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ agen(&nod1, &nod2)
+ gins(ppc64.AMOVD, &nod2, res)
+ regfree(&nod2)
+ } else {
+ agen(&nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var a int
+ var check int
+ var t *gc.Type
+ var t0 *gc.Type
+ var tl gc.Node
+ var tr gc.Node
+ var tl2 gc.Node
+ var tr2 gc.Node
+ var nm1 gc.Node
+ var nz gc.Node
+ var tm gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will generate undefined result.
+ // Also need to explicitly trap on division on zero,
+ // the hardware will silently generate undefined result.
+ // DIVW will leave unpredicable result in higher 32-bit,
+ // so always use DIVD/DIVDU.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 8 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ } else {
+ t = gc.Types[gc.TUINT64]
+ }
+ check = 0
+ }
+
+ a = optoas(gc.ODIV, t)
+
+ regalloc(&tl, t0, nil)
+ regalloc(&tr, t0, nil)
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &tl)
+ cgen(nr, &tr)
+ } else {
+ cgen(nr, &tr)
+ cgen(nl, &tl)
+ }
+
+ if t != t0 {
+ // Convert
+ tl2 = tl
+
+ tr2 = tr
+ tl.Type = t
+ tr.Type = t
+ gmove(&tl2, &tl)
+ gmove(&tr2, &tr)
+ }
+
+ // Handle divide-by-zero panic.
+ p1 = gins(optoas(gc.OCMP, t), &tr, nil)
+
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGZERO
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+
+ if check != 0 {
+ gc.Nodconst(&nm1, t, -1)
+ gins(optoas(gc.OCMP, t), &tr, &nm1)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &tl)
+
+ gmove(&tl, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&nz, t, 0)
+
+ gmove(&nz, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ p1 = gins(a, &tr, &tl)
+ if op == gc.ODIV {
+ regfree(&tr)
+ gmove(&tl, res)
+ } else {
+ // A%B = A-(A/B*B)
+ regalloc(&tm, t, nil)
+
+ // patch div to use the 3 register form
+ // TODO(minux): add gins3?
+ p1.Reg = p1.To.Reg
+
+ p1.To.Reg = tm.Val.U.Reg
+ gins(optoas(gc.OMUL, t), &tr, &tm)
+ regfree(&tr)
+ gins(optoas(gc.OSUB, t), &tm, &tl)
+ regfree(&tm)
+ gmove(&tl, res)
+ }
+
+ regfree(&tl)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var w int
+ var a int
+ var m gc.Magic
+
+ // TODO(minux): enable division by magic multiply (also need to fix longmod below)
+ //if(nr->op != OLITERAL)
+ goto longdiv
+
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch gc.Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case gc.TUINT64:
+ m.W = w
+ m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ gc.Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, nil)
+ gc.Nodconst(&n2, nl.Type, int64(m.Um))
+ regalloc(&n3, nl.Type, res)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+
+ gc.Nodconst(&n2, nl.Type, 1)
+ gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
+ gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
+ } else {
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+
+ case gc.TINT64:
+ m.W = w
+ m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
+ gc.Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, res)
+ gc.Nodconst(&n2, nl.Type, m.Sm)
+ regalloc(&n3, nl.Type, nil)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+ }
+
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
+
+ gc.Nodconst(&n2, nl.Type, int64(w)-1)
+
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ regalloc(&n2, nl.Type, nil)
+ cgen_div(gc.ODIV, &n1, nr, &n2)
+ a = optoas(gc.OMUL, nl.Type)
+ if w == 8 {
+ }
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ //a = AIMULW;
+ if !(gc.Smallintconst(nr) != 0) {
+ regalloc(&n3, nl.Type, nil)
+ cgen(nr, &n3)
+ gins(a, &n3, &n2)
+ regfree(&n3)
+ } else {
+ gins(a, nr, &n2)
+ }
+ gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var w int
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var p *obj.Prog
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ t = nl.Type
+ w = int(t.Width * 8)
+ cgenr(nl, &n1, res)
+ cgenr(nr, &n2, nil)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16,
+ gc.TINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p = gins(ppc64.ASRAD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p = gins(ppc64.ASRD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TINT64,
+ gc.TUINT64:
+ if gc.Issigned[t.Etype] != 0 {
+ p = gins(ppc64.AMULHD, &n2, &n1)
+ } else {
+ p = gins(ppc64.AMULHDU, &n2, &n1)
+ }
+
+ default:
+ gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var n5 gc.Node
+ var a int
+ var p1 *obj.Prog
+ var sc uint64
+ var tcount *gc.Type
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if nl.Ullman >= gc.UINF {
+ gc.Tempname(&n4, nl.Type)
+ cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ gc.Tempname(&n5, nr.Type)
+ cgen(nr, &n5)
+ nr = &n5
+ }
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ regalloc(&n2, nl.Type, res)
+
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ cgen(nl, &n2)
+ }
+
+ regfree(&n3)
+
+ // test and fix up large shifts
+ if !(bounded != 0) {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+
+ret:
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint64
+ var c uint64
+ var q uint64
+ var t uint64
+ var boff uint64
+ var dst gc.Node
+ var end gc.Node
+ var r0 gc.Node
+ var f *gc.Node
+ var p *obj.Prog
+ var pl *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("clearfat %v (%v, size: %d)\n", gc.Nconv(nl, 0), gc.Tconv(nl.Type, 0), nl.Type.Width)
+ }
+
+ w = uint64(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ //if(componentgen(N, nl))
+ // return;
+
+ c = w % 8 // bytes
+ q = w / 8 // dwords
+
+ if reg[ppc64.REGRT1] > 0 {
+ gc.Fatal("R%d in use during clearfat", ppc64.REGRT1)
+ }
+
+ gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REG_R0) // r0 is always zero
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+ reg[ppc64.REGRT1]++
+ agen(nl, &dst)
+
+ if q > 128 {
+ p = gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+
+ regalloc(&end, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(q * 8)
+
+ p = gins(ppc64.AMOVDU, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 8
+ pl = p
+
+ p = gins(ppc64.ACMP, &dst, &end)
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+
+ regfree(&end)
+
+ // The loop leaves R3 on the last zeroed dword
+ boff = 8
+ } else if q >= 4 {
+ p = gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ f = gc.Sysfunc("duffzero")
+ p = gins(obj.ADUFFZERO, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
+ p.To.Offset = int64(4 * (128 - q))
+
+ // duffzero leaves R3 on the last zeroed dword
+ boff = 8
+ } else {
+ for t = 0; t < q; t++ {
+ p = gins(ppc64.AMOVD, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(8 * t)
+ }
+
+ boff = 8 * q
+ }
+
+ for t = 0; t < c; t++ {
+ p = gins(ppc64.AMOVB, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(t + boff)
+ }
+
+ reg[ppc64.REGRT1]--
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+ fmt.Printf("expandchecks: %v\n", p)
+ }
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatal("invalid nil check %v\n", p)
+ }
+
+ /*
+ // check is
+ // TD $4, R0, arg (R0 is always zero)
+ // eqv. to:
+ // tdeq r0, arg
+ // NOTE: this needs special runtime support to make SIGTRAP recoverable.
+ reg = p->from.reg;
+ p->as = ATD;
+ p->from = p->to = p->from3 = zprog.from;
+ p->from.type = TYPE_CONST;
+ p->from.offset = 4;
+ p->from.reg = 0;
+ p->reg = REG_R0;
+ p->to.type = TYPE_REG;
+ p->to.reg = reg;
+ */
+ // check is
+ // CMP arg, R0
+ // BNE 2(PC) [likely]
+ // MOVD R0, 0(R0)
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = ppc64.ACMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGZERO
+ p1.As = ppc64.ABNE
+
+ //p1->from.type = TYPE_CONST;
+ //p1->from.offset = 1; // likely
+ p1.To.Type = obj.TYPE_BRANCH
+
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ p2.As = ppc64.AMOVD
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REG_R0
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = ppc64.REG_R0
+ p2.To.Offset = 0
+ }
+}
diff --git a/src/cmd/new9g/gsubr.go b/src/cmd/new9g/gsubr.go
new file mode 100644
index 0000000000..932ae0febc
--- /dev/null
+++ b/src/cmd/new9g/gsubr.go
@@ -0,0 +1,1169 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 6l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int64 = 4096
+
+var resvd = []int{
+ ppc64.REGZERO,
+ ppc64.REGSP, // reserved for SP
+ // We need to preserve the C ABI TLS pointer because sigtramp
+ // may happen during C code and needs to access the g. C
+ // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
+ // won't know which convention to use. By preserving REGTLS,
+ // we can just retrieve g from TLS when we aren't sure.
+ ppc64.REGTLS,
+
+ // TODO(austin): Consolidate REGTLS and REGG?
+ ppc64.REGG,
+ ppc64.REGTMP, // REGTMP
+ ppc64.FREGCVI,
+ ppc64.FREGZERO,
+ ppc64.FREGHALF,
+ ppc64.FREGONE,
+ ppc64.FREGTWO,
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = 0; i < ppc64.NREG+ppc64.NFREG; i++ {
+ reg[i] = 0
+ }
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]-ppc64.REG_R0]++
+ }
+}
+
+var regpc [len(reg)]uint32
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]-ppc64.REG_R0]--
+ }
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated, %p\n", gc.Ctxt.Rconv(i+ppc64.REG_R0), regpc[i])
+ }
+ }
+}
+
+func anyregalloc() int {
+ var i int
+ var j int
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return 1
+ ok:
+ }
+
+ return 0
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+ var fixfree int
+ var fltfree int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ if gc.Debug['r'] != 0 {
+ fixfree = 0
+ fltfree = 0
+ for i = ppc64.REG_R0; i < ppc64.REG_F31; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ if i < ppc64.REG_F0 {
+ fixfree++
+ } else {
+ fltfree++
+ }
+ }
+ }
+
+ fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
+ }
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TINT64,
+ gc.TUINT64,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= ppc64.REGMIN && i <= ppc64.REGMAX {
+ goto out
+ }
+ }
+
+ for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = ppc64.REG_R0; i < ppc64.REG_R0+ppc64.NREG; i++ {
+ fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
+ }
+ gc.Fatal("out of fixed registers")
+ fallthrough
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX {
+ goto out
+ }
+ }
+
+ for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = ppc64.REG_F0; i < ppc64.REG_F0+ppc64.NREG; i++ {
+ fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
+ }
+ gc.Fatal("out of floating registers")
+ fallthrough
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
+ return
+
+out:
+ reg[i-ppc64.REG_R0]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg) - ppc64.REG_R0
+ if i == ppc64.REGSP-ppc64.REG_R0 {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regpc[i] = 0
+ }
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
+ // cannot have more than 16-bit of immediate in ADD, etc.
+ // instead, MOV into register first.
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(ppc64.AMOVD, &n1, &ntmp)
+ gins(as, &ntmp, n2)
+ regfree(&ntmp)
+ return
+ }
+
+ gins(as, &n1, n2)
+}
+
+/*
+ * generate
+ * as n, $c (CMP/CMPU)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ switch as {
+ default:
+ gc.Fatal("ginscon2")
+ fallthrough
+
+ case ppc64.ACMP:
+ if -ppc64.BIG <= c && c <= ppc64.BIG {
+ gins(as, n2, &n1)
+ return
+ }
+
+ case ppc64.ACMPU:
+ if 0 <= c && c <= 2*ppc64.BIG {
+ gins(as, n2, &n1)
+ return
+ }
+ }
+
+ // MOV n1 into register first
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(ppc64.AMOVD, &n1, &ntmp)
+ gins(as, n2, &ntmp)
+ regfree(&ntmp)
+}
+
+/*
+ * set up nodes representing 2^63
+ */
+var bigi gc.Node
+
+var bigf gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
+ gc.Mpshiftfix(bigi.Val.U.Xval, 63)
+
+ bigf = bigi
+ bigf.Type = gc.Types[gc.TFLOAT64]
+ bigf.Val.Ctype = gc.CTFLT
+ bigf.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
+}
+
+/*
+ * generate move:
+ * t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var r3 gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ switch tt {
+ default:
+ gc.Convconst(&con, t.Type, &f.Val)
+
+ case gc.TINT32,
+ gc.TINT16,
+ gc.TINT8:
+ gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TUINT32,
+ gc.TUINT16,
+ gc.TUINT8:
+ gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+ }
+
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // constants can't move directly to memory.
+ if gc.Ismem(t) != 0 {
+ goto hard
+ }
+ }
+
+ // float constants come from memory.
+ //if(isfloat[tt])
+ // goto hard;
+
+ // 64-bit immediates are also from memory.
+ //if(isint[tt])
+ // goto hard;
+ //// 64-bit immediates are really 32-bit sign-extended
+ //// unless moving into a register.
+ //if(isint[tt]) {
+ // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
+ // goto hard;
+ // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
+ // goto hard;
+ //}
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+ fallthrough
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.TINT8<<16 | gc.TUINT8, // same size
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.TINT16<<16 | gc.TUINT16, // same size
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TINT32,
+ // truncate
+ gc.TUINT64<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.TINT32<<16 | gc.TUINT32, // same size
+ gc.TUINT32<<16 | gc.TUINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = ppc64.AMOVWZ
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = ppc64.AMOVD
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32,
+ gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVB
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32,
+ gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVBZ
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32,
+ gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVH
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32,
+ gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVHZ
+
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVW
+
+ goto rdst
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVWZ
+
+ goto rdst
+
+ //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT64,
+ gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32,
+ gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ bignodes()
+
+ regalloc(&r1, gc.Types[ft], f)
+ gmove(f, &r1)
+ if tt == gc.TUINT64 {
+ regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ gmove(&bigf, &r2)
+ gins(ppc64.AFCMPU, &r1, &r2)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
+ gins(ppc64.AFSUB, &r2, &r1)
+ gc.Patch(p1, gc.Pc)
+ regfree(&r2)
+ }
+
+ regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ regalloc(&r3, gc.Types[gc.TINT64], t)
+ gins(ppc64.AFCTIDZ, &r1, &r2)
+ p1 = gins(ppc64.AFMOVD, &r2, nil)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AMOVD, nil, &r3)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ regfree(&r2)
+ regfree(&r1)
+ if tt == gc.TUINT64 {
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
+ gins(ppc64.AMOVD, &bigi, &r1)
+ gins(ppc64.AADD, &r1, &r3)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r3, t)
+ regfree(&r3)
+ return
+
+ //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native int64 -> uint64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ /*
+ * integer to float
+ */
+ case gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64,
+ gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ bignodes()
+
+ regalloc(&r1, gc.Types[gc.TINT64], nil)
+ gmove(f, &r1)
+ if ft == gc.TUINT64 {
+ gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
+ gmove(&bigi, &r2)
+ gins(ppc64.ACMPU, &r1, &r2)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ p2 = gins(ppc64.ASRD, nil, &r1)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = 1
+ gc.Patch(p1, gc.Pc)
+ }
+
+ regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+ p1 = gins(ppc64.AMOVD, &r1, nil)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AFMOVD, nil, &r2)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ gins(ppc64.AFCFID, &r2, &r2)
+ regfree(&r1)
+ if ft == gc.TUINT64 {
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
+ gins(ppc64.AFMUL, &r1, &r2)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r2, t)
+ regfree(&r2)
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVS
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = ppc64.AFRSP
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var w int32
+ var p *obj.Prog
+ var af obj.Addr
+ var at obj.Addr
+
+ // TODO(austin): Add self-move test like in 6g (but be careful
+ // of truncation moves)
+
+ af = obj.Addr{}
+
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case ppc64.AMOVB,
+ ppc64.AMOVBU,
+ ppc64.AMOVBZ,
+ ppc64.AMOVBZU:
+ w = 1
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZ,
+ ppc64.AMOVHZU:
+ w = 2
+
+ case ppc64.AMOVW,
+ ppc64.AMOVWU,
+ ppc64.AMOVWZ,
+ ppc64.AMOVWZU:
+ w = 4
+
+ case ppc64.AMOVD,
+ ppc64.AMOVDU:
+ if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR {
+ break
+ }
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ return p
+}
+
+func fixlargeoffset(n *gc.Node) {
+ var a gc.Node
+
+ if n == nil {
+ return
+ }
+ if n.Op != gc.OINDREG {
+ return
+ }
+ if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // TODO(minux): offset too large, move into R31 and add to R31 instead.
+ // this is used only in test/fixedbugs/issue6036.go.
+ gc.Fatal("offset too large: %v", gc.Nconv(n, 0))
+
+ a = *n
+ a.Op = gc.OREGISTER
+ a.Type = gc.Types[gc.Tptr]
+ a.Xoffset = 0
+ gc.Cgen_checknil(&a)
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = ppc64.ABEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = ppc64.ABNE
+
+ case gc.OLT<<16 | gc.TINT8, // ACMP
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64,
+ gc.OLT<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ // AFCMPU
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = ppc64.ABLT
+
+ case gc.OLE<<16 | gc.TINT8, // ACMP
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64,
+ gc.OLE<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ // AFCMPU
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = ppc64.ABLE
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64,
+ gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OGT<<16 | gc.TFLOAT32,
+ gc.OGT<<16 | gc.TFLOAT64:
+ a = ppc64.ABGT
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64,
+ gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OGE<<16 | gc.TFLOAT32,
+ gc.OGE<<16 | gc.TFLOAT64:
+ a = ppc64.ABGE
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TPTR32,
+ gc.OCMP<<16 | gc.TINT64:
+ a = ppc64.ACMP
+
+ case gc.OCMP<<16 | gc.TUINT8,
+ gc.OCMP<<16 | gc.TUINT16,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TUINT64,
+ gc.OCMP<<16 | gc.TPTR64:
+ a = ppc64.ACMPU
+
+ case gc.OCMP<<16 | gc.TFLOAT32,
+ gc.OCMP<<16 | gc.TFLOAT64:
+ a = ppc64.AFCMPU
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.OAS<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.OAS<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.OAS<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.OAS<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = ppc64.AMOVWZ
+
+ case gc.OAS<<16 | gc.TINT64,
+ gc.OAS<<16 | gc.TUINT64,
+ gc.OAS<<16 | gc.TPTR64:
+ a = ppc64.AMOVD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8,
+ gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16,
+ gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32,
+ gc.OADD<<16 | gc.TINT64,
+ gc.OADD<<16 | gc.TUINT64,
+ gc.OADD<<16 | gc.TPTR64:
+ a = ppc64.AADD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = ppc64.AFADDS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = ppc64.AFADD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8,
+ gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16,
+ gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32,
+ gc.OSUB<<16 | gc.TINT64,
+ gc.OSUB<<16 | gc.TUINT64,
+ gc.OSUB<<16 | gc.TPTR64:
+ a = ppc64.ASUB
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = ppc64.AFSUBS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = ppc64.AFSUB
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8,
+ gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16,
+ gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32,
+ gc.OMINUS<<16 | gc.TINT64,
+ gc.OMINUS<<16 | gc.TUINT64,
+ gc.OMINUS<<16 | gc.TPTR64:
+ a = ppc64.ANEG
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8,
+ gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16,
+ gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32,
+ gc.OAND<<16 | gc.TINT64,
+ gc.OAND<<16 | gc.TUINT64,
+ gc.OAND<<16 | gc.TPTR64:
+ a = ppc64.AAND
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8,
+ gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16,
+ gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32,
+ gc.OOR<<16 | gc.TINT64,
+ gc.OOR<<16 | gc.TUINT64,
+ gc.OOR<<16 | gc.TPTR64:
+ a = ppc64.AOR
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8,
+ gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16,
+ gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32,
+ gc.OXOR<<16 | gc.TINT64,
+ gc.OXOR<<16 | gc.TUINT64,
+ gc.OXOR<<16 | gc.TPTR64:
+ a = ppc64.AXOR
+
+ // TODO(minux): handle rotates
+ //case CASE(OLROT, TINT8):
+ //case CASE(OLROT, TUINT8):
+ //case CASE(OLROT, TINT16):
+ //case CASE(OLROT, TUINT16):
+ //case CASE(OLROT, TINT32):
+ //case CASE(OLROT, TUINT32):
+ //case CASE(OLROT, TPTR32):
+ //case CASE(OLROT, TINT64):
+ //case CASE(OLROT, TUINT64):
+ //case CASE(OLROT, TPTR64):
+ // a = 0//???; RLDC?
+ // break;
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8,
+ gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16,
+ gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32,
+ gc.OLSH<<16 | gc.TINT64,
+ gc.OLSH<<16 | gc.TUINT64,
+ gc.OLSH<<16 | gc.TPTR64:
+ a = ppc64.ASLD
+
+ case gc.ORSH<<16 | gc.TUINT8,
+ gc.ORSH<<16 | gc.TUINT16,
+ gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32,
+ gc.ORSH<<16 | gc.TUINT64,
+ gc.ORSH<<16 | gc.TPTR64:
+ a = ppc64.ASRD
+
+ case gc.ORSH<<16 | gc.TINT8,
+ gc.ORSH<<16 | gc.TINT16,
+ gc.ORSH<<16 | gc.TINT32,
+ gc.ORSH<<16 | gc.TINT64:
+ a = ppc64.ASRAD
+
+ // TODO(minux): handle rotates
+ //case CASE(ORROTC, TINT8):
+ //case CASE(ORROTC, TUINT8):
+ //case CASE(ORROTC, TINT16):
+ //case CASE(ORROTC, TUINT16):
+ //case CASE(ORROTC, TINT32):
+ //case CASE(ORROTC, TUINT32):
+ //case CASE(ORROTC, TINT64):
+ //case CASE(ORROTC, TUINT64):
+ // a = 0//??? RLDC??
+ // break;
+
+ case gc.OHMUL<<16 | gc.TINT64:
+ a = ppc64.AMULHD
+
+ case gc.OHMUL<<16 | gc.TUINT64,
+ gc.OHMUL<<16 | gc.TPTR64:
+ a = ppc64.AMULHDU
+
+ case gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT64:
+ a = ppc64.AMULLD
+
+ case gc.OMUL<<16 | gc.TUINT8,
+ gc.OMUL<<16 | gc.TUINT16,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32,
+ // don't use word multiply, the high 32-bit are undefined.
+ // fallthrough
+ gc.OMUL<<16 | gc.TUINT64,
+ gc.OMUL<<16 | gc.TPTR64:
+ a = ppc64.AMULLD
+ // for 64-bit multiplies, signedness doesn't matter.
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = ppc64.AFMULS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = ppc64.AFMUL
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.ODIV<<16 | gc.TINT16,
+ gc.ODIV<<16 | gc.TINT32,
+ gc.ODIV<<16 | gc.TINT64:
+ a = ppc64.ADIVD
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.ODIV<<16 | gc.TUINT16,
+ gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.ODIV<<16 | gc.TUINT64,
+ gc.ODIV<<16 | gc.TPTR64:
+ a = ppc64.ADIVDU
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = ppc64.AFDIVS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = ppc64.AFDIV
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) int {
+ // TODO(minux)
+
+ return -1
+}
+
+func sudoclean() {
+ return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+ // TODO(minux)
+
+ *a = obj.Addr{}
+ return 0
+}
diff --git a/src/cmd/new9g/opt.go b/src/cmd/new9g/opt.go
new file mode 100644
index 0000000000..a0294209aa
--- /dev/null
+++ b/src/cmd/new9g/opt.go
@@ -0,0 +1,42 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0
+ V_V = 1 << 1
+)
diff --git a/src/cmd/new9g/peep.go b/src/cmd/new9g/peep.go
new file mode 100644
index 0000000000..de3d7c348a
--- /dev/null
+++ b/src/cmd/new9g/peep.go
@@ -0,0 +1,1071 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+ var g *gc.Graph
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+
+ // TODO(austin) Handle smaller moves. arm and amd64
+ // distinguish between moves that moves that *must*
+ // sign/zero extend and moves that don't care so they
+ // can eliminate moves that don't care without
+ // breaking moves that do care. This might let us
+ // simplify or remove the next peep loop, too.
+ if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+ if regtyp(&p.To) != 0 {
+ // Try to eliminate reg->reg moves
+ if regtyp(&p.From) != 0 {
+ if p.From.Type == p.To.Type {
+ if copyprop(r) != 0 {
+ excise(r)
+ t++
+ } else if subprop(r) != 0 && copyprop(r) != 0 {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ // Convert uses to $0 to uses of R0 and
+ // propagate R0
+ if regzer(&p.From) != 0 {
+ if p.To.Type == obj.TYPE_REG {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ if copyprop(r) != 0 {
+ excise(r)
+ t++
+ } else if subprop(r) != 0 && copyprop(r) != 0 {
+ excise(r)
+ t++
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ /*
+ * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+ */
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ default:
+ continue
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ:
+ if p.To.Type != obj.TYPE_REG {
+ continue
+ }
+ }
+
+ r1 = r.Link
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.As != p.As {
+ continue
+ }
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+ continue
+ }
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+ continue
+ }
+ excise(r1)
+ }
+
+ if gc.Debug['D'] > 1 {
+ goto ret /* allow following code improvement to be suppressed */
+ }
+
+ /*
+ * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
+ * when OP can set condition codes correctly
+ */
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case ppc64.ACMP,
+ ppc64.ACMPW: /* always safe? */
+ if !(regzer(&p.To) != 0) {
+ continue
+ }
+ r1 = r.S1
+ if r1 == nil {
+ continue
+ }
+ switch r1.Prog.As {
+ default:
+ continue
+
+ /* the conditions can be complex and these are currently little used */
+ case ppc64.ABCL,
+ ppc64.ABC:
+ continue
+
+ case ppc64.ABEQ,
+ ppc64.ABGE,
+ ppc64.ABGT,
+ ppc64.ABLE,
+ ppc64.ABLT,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ break
+ }
+
+ r1 = r
+ for {
+ r1 = gc.Uniqp(r1)
+ if !(r1 != nil && r1.Prog.As == obj.ANOP) {
+ break
+ }
+ }
+
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
+ continue
+ }
+ switch p1.As {
+ /* irregular instructions */
+ case ppc64.ASUB,
+ ppc64.AADD,
+ ppc64.AXOR,
+ ppc64.AOR:
+ if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
+ continue
+ }
+ }
+
+ switch p1.As {
+ default:
+ continue
+
+ case ppc64.AMOVW,
+ ppc64.AMOVD:
+ if p1.From.Type != obj.TYPE_REG {
+ continue
+ }
+ continue
+
+ case ppc64.AANDCC,
+ ppc64.AANDNCC,
+ ppc64.AORCC,
+ ppc64.AORNCC,
+ ppc64.AXORCC,
+ ppc64.ASUBCC,
+ ppc64.ASUBECC,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZECC,
+ ppc64.AADDCC,
+ ppc64.AADDCCC,
+ ppc64.AADDECC,
+ ppc64.AADDMECC,
+ ppc64.AADDZECC,
+ ppc64.ARLWMICC,
+ ppc64.ARLWNMCC,
+ /* don't deal with floating point instructions for now */
+ /*
+ case AFABS:
+ case AFADD:
+ case AFADDS:
+ case AFCTIW:
+ case AFCTIWZ:
+ case AFDIV:
+ case AFDIVS:
+ case AFMADD:
+ case AFMADDS:
+ case AFMOVD:
+ case AFMSUB:
+ case AFMSUBS:
+ case AFMUL:
+ case AFMULS:
+ case AFNABS:
+ case AFNEG:
+ case AFNMADD:
+ case AFNMADDS:
+ case AFNMSUB:
+ case AFNMSUBS:
+ case AFRSP:
+ case AFSUB:
+ case AFSUBS:
+ case ACNTLZW:
+ case AMTFSB0:
+ case AMTFSB1:
+ */
+ ppc64.AADD,
+ ppc64.AADDV,
+ ppc64.AADDC,
+ ppc64.AADDCV,
+ ppc64.AADDME,
+ ppc64.AADDMEV,
+ ppc64.AADDE,
+ ppc64.AADDEV,
+ ppc64.AADDZE,
+ ppc64.AADDZEV,
+ ppc64.AAND,
+ ppc64.AANDN,
+ ppc64.ADIVW,
+ ppc64.ADIVWV,
+ ppc64.ADIVWU,
+ ppc64.ADIVWUV,
+ ppc64.ADIVD,
+ ppc64.ADIVDV,
+ ppc64.ADIVDU,
+ ppc64.ADIVDUV,
+ ppc64.AEQV,
+ ppc64.AEXTSB,
+ ppc64.AEXTSH,
+ ppc64.AEXTSW,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLWV,
+ ppc64.AMULHD,
+ ppc64.AMULHDU,
+ ppc64.AMULLD,
+ ppc64.AMULLDV,
+ ppc64.ANAND,
+ ppc64.ANEG,
+ ppc64.ANEGV,
+ ppc64.ANOR,
+ ppc64.AOR,
+ ppc64.AORN,
+ ppc64.AREM,
+ ppc64.AREMV,
+ ppc64.AREMU,
+ ppc64.AREMUV,
+ ppc64.AREMD,
+ ppc64.AREMDV,
+ ppc64.AREMDU,
+ ppc64.AREMDUV,
+ ppc64.ARLWMI,
+ ppc64.ARLWNM,
+ ppc64.ASLW,
+ ppc64.ASRAW,
+ ppc64.ASRW,
+ ppc64.ASLD,
+ ppc64.ASRAD,
+ ppc64.ASRD,
+ ppc64.ASUB,
+ ppc64.ASUBV,
+ ppc64.ASUBC,
+ ppc64.ASUBCV,
+ ppc64.ASUBME,
+ ppc64.ASUBMEV,
+ ppc64.ASUBE,
+ ppc64.ASUBEV,
+ ppc64.ASUBZE,
+ ppc64.ASUBZEV,
+ ppc64.AXOR:
+ t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+ }
+
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("cmp %v; %v -> ", p1, p)
+ }
+ p1.As = int16(t)
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("%v\n", p1)
+ }
+ excise(r)
+ continue
+ }
+ }
+
+ret:
+ gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+ obj.Nopout(p)
+ gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+ if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+ if a.Sym == nil && a.Reg == 0 {
+ if a.Offset == 0 {
+ return 1
+ }
+ }
+ }
+ if a.Type == obj.TYPE_REG {
+ if a.Reg == ppc64.REGZERO {
+ return 1
+ }
+ }
+ return 0
+}
+
+func regtyp(a *obj.Addr) int {
+ // TODO(rsc): Floating point register exclusions?
+ return bool2int(a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO)
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R1
+ * ADD b, R1 / no use of R2
+ * MOV R1, R2
+ * would be converted to
+ * MOV a, R2
+ * ADD b, R2
+ * MOV R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences. This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !(regtyp(v1) != 0) {
+ return 0
+ }
+ v2 = &p.To
+ if !(regtyp(v2) != 0) {
+ return 0
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return 0
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ goto gotit
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return 0
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return 1
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail (v1->v2 move must remain)
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) int {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("eliminating self-move\n", r0.Prog)
+ }
+ return 1
+ }
+
+ gactive++
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+ }
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return 1
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if !(f != 0) && gc.Uniqp(r) == nil {
+ // Multiple predecessors; conservatively
+ // assume v1 was set on other path
+ f = 1
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return 0
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if !(gc.Debug['P'] != 0) {
+ return 0
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return 0
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return 0
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return 1
+ }
+ }
+
+ if !(f != 0) {
+ t = copyu(p, v1, nil)
+ if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !(copy1(v1, v2, r.S2, f) != 0) {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+// 1 if v only used
+// 2 if v is set and used in one address (read-alter-rewrite;
+// can't substitute)
+// 3 if v is only set
+// 4 if v is set in one address and used in another (so addresses
+// can be rewritten independently)
+// 0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ if p.From3.Type != obj.TYPE_NONE {
+ // 9g never generates a from3
+ fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(&p.From3))
+ }
+
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", ppc64.Aconv(int(p.As)))
+ return 2
+
+ case obj.ANOP, /* read p->from, write p->to */
+ ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ,
+ ppc64.AMOVD,
+ ppc64.ANEG,
+ ppc64.ANEGCC,
+ ppc64.AADDME,
+ ppc64.AADDMECC,
+ ppc64.AADDZE,
+ ppc64.AADDZECC,
+ ppc64.ASUBME,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZE,
+ ppc64.ASUBZECC,
+ ppc64.AFCTIW,
+ ppc64.AFCTIWZ,
+ ppc64.AFCTID,
+ ppc64.AFCTIDZ,
+ ppc64.AFCFID,
+ ppc64.AFCFIDCC,
+ ppc64.AFMOVS,
+ ppc64.AFMOVD,
+ ppc64.AFRSP,
+ ppc64.AFNEG,
+ ppc64.AFNEGCC:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !(copyas(&p.To, v) != 0) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) != 0 {
+ // Fix up implicit from
+ if p.From.Type == obj.TYPE_NONE {
+ p.From = p.To
+ }
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ // p->to only indirectly uses v
+ return 1
+ }
+
+ return 0
+
+ case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
+ ppc64.AMOVBZU,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZU,
+ ppc64.AMOVWZU,
+ ppc64.AMOVDU:
+ if p.From.Type == obj.TYPE_MEM {
+ if copyas(&p.From, v) != 0 {
+ // No s!=nil check; need to fail
+ // anyway in that case
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) != 0 {
+ return 3
+ }
+ } else if p.To.Type == obj.TYPE_MEM {
+ if copyas(&p.To, v) != 0 {
+ return 2
+ }
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ } else {
+ fmt.Printf("copyu: bad %v\n", p)
+ }
+
+ return 0
+
+ case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
+ ppc64.ARLWMICC:
+ if copyas(&p.To, v) != 0 {
+ return 2
+ }
+ fallthrough
+
+ /* fall through */
+ case ppc64.AADD,
+ /* read p->from, read p->reg, write p->to */
+ ppc64.AADDC,
+ ppc64.AADDE,
+ ppc64.ASUB,
+ ppc64.ASLW,
+ ppc64.ASRW,
+ ppc64.ASRAW,
+ ppc64.ASLD,
+ ppc64.ASRD,
+ ppc64.ASRAD,
+ ppc64.AOR,
+ ppc64.AORCC,
+ ppc64.AORN,
+ ppc64.AORNCC,
+ ppc64.AAND,
+ ppc64.AANDCC,
+ ppc64.AANDN,
+ ppc64.AANDNCC,
+ ppc64.ANAND,
+ ppc64.ANANDCC,
+ ppc64.ANOR,
+ ppc64.ANORCC,
+ ppc64.AXOR,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLD,
+ ppc64.ADIVW,
+ ppc64.ADIVD,
+ ppc64.ADIVWU,
+ ppc64.ADIVDU,
+ ppc64.AREM,
+ ppc64.AREMU,
+ ppc64.AREMD,
+ ppc64.AREMDU,
+ ppc64.ARLWNM,
+ ppc64.ARLWNMCC,
+ ppc64.AFADDS,
+ ppc64.AFADD,
+ ppc64.AFSUBS,
+ ppc64.AFSUB,
+ ppc64.AFMULS,
+ ppc64.AFMUL,
+ ppc64.AFDIVS,
+ ppc64.AFDIV:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if copysub1(p, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !(copyas(&p.To, v) != 0) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) != 0 {
+ if p.Reg == 0 {
+ // Fix up implicit reg (e.g., ADD
+ // R3,R4 -> ADD R3,R4,R4) so we can
+ // update reg and to separately.
+ p.Reg = p.To.Reg
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 4
+ }
+ if copyau1(p, v) != 0 {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau1(p, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case ppc64.ABEQ,
+ ppc64.ABGT,
+ ppc64.ABGE,
+ ppc64.ABLT,
+ ppc64.ABLE,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ return 0
+
+ case obj.ACHECKNIL, /* read p->from */
+ ppc64.ACMP, /* read p->from, read p->to */
+ ppc64.ACMPU,
+ ppc64.ACMPW,
+ ppc64.ACMPWU,
+ ppc64.AFCMPO,
+ ppc64.AFCMPU:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) != 0 {
+ return 1
+ }
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ // 9g never generates a branch to a GPR (this isn't
+ // even a normal instruction; liblink turns it in to a
+ // mov and a branch).
+ case ppc64.ABR: /* read p->to */
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 1
+ }
+ return 0
+
+ case ppc64.ARETURN: /* funny */
+ if s != nil {
+ return 0
+ }
+
+ // All registers die at this point, so claim
+ // everything is set (and not used).
+ return 3
+
+ case ppc64.ABL: /* funny */
+ if v.Type == obj.TYPE_REG {
+ // TODO(rsc): REG_R0 and REG_F0 used to be
+ // (when register numbers started at 0) exregoffset and exfregoffset,
+ // which are unset entirely.
+ // It's strange that this handles R0 and F0 differently from the other
+ // registers. Possible failure to optimize?
+ if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+ return 2
+ }
+ if v.Reg == ppc64.REGARG {
+ return 2
+ }
+ if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+ return 2
+ }
+ }
+
+ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) != 0 {
+ return 4
+ }
+ return 3
+
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R3 is ptr to memory, used and set, cannot be substituted.
+ case obj.ADUFFZERO:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 0 {
+ return 1
+ }
+ if v.Reg == 3 {
+ return 2
+ }
+ }
+
+ return 0
+
+ // R3, R4 are ptr to src, dst, used and set, cannot be substituted.
+ // R5 is scratch, set by DUFFCOPY, cannot be substituted.
+ case obj.ADUFFCOPY:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 3 || v.Reg == 4 {
+ return 2
+ }
+ if v.Reg == 5 {
+ return 3
+ }
+ }
+
+ return 0
+
+ case obj.ATEXT: /* funny */
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == ppc64.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL:
+ return 0
+ }
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) int {
+ if regtyp(v) != 0 {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) int {
+ if copyas(a, v) != 0 {
+ return 1
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+ if v.Reg == a.Reg {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) int {
+ if regtyp(v) != 0 && v.Reg != 0 {
+ if p.Reg == v.Reg {
+ return 1
+ }
+ }
+ return 0
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau(a, v) != 0 {
+ a.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau1(p1, v) != 0 {
+ p1.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) int {
+ if a.Type != v.Type {
+ return 0
+ }
+ if regtyp(v) != 0 && a.Reg == v.Reg {
+ return 1
+ }
+ if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+ if v.Offset == a.Offset {
+ return 1
+ }
+ }
+ return 0
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) int {
+ return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+}
+
+func stackaddr(a *obj.Addr) int {
+ return bool2int(a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP)
+}
diff --git a/src/cmd/new9g/prog.go b/src/cmd/new9g/prog.go
new file mode 100644
index 0000000000..e188f0dc65
--- /dev/null
+++ b/src/cmd/new9g/prog.go
@@ -0,0 +1,318 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+import "cmd/internal/gc"
+
+const (
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [ppc64.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Power opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+ // Integer
+ ppc64.AADD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASUB: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ANEG: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AAND: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AOR: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AXOR: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULLD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULLW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULHD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULHDU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ADIVD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ADIVDU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASLD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASRD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASRAD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ACMP: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.ACMPU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.ATD: gc.ProgInfo{gc.SizeQ | gc.RightRead, 0, 0, 0},
+
+ // Floating point.
+ ppc64.AFADD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFADDS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFSUB: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFSUBS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFMUL: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFMULS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFDIV: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFDIVS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCTIDZ: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCFID: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCMPU: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.AFRSP: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Moves
+ ppc64.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVBU: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVBZ: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVH: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVHU: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVHZ: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+
+ // there is no AMOVWU.
+ ppc64.AMOVWZU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVWZ: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ ppc64.AMOVDU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc, 0, 0, 0},
+ ppc64.AFMOVS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AFMOVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // Jumps
+ ppc64.ABR: gc.ProgInfo{gc.Jump | gc.Break, 0, 0, 0},
+ ppc64.ABL: gc.ProgInfo{gc.Call, 0, 0, 0},
+ ppc64.ABEQ: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABNE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABGE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABLT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABGT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABLE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ARETURN: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.Call, 0, 0, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.Call, 0, 0, 0},
+}
+
+var initproginfo_initialized int
+
+func initproginfo() {
+ var addvariant = []int{V_CC, V_V, V_CC | V_V}
+ var as int
+ var as2 int
+ var i int
+ var variant int
+
+ if initproginfo_initialized != 0 {
+ return
+ }
+ initproginfo_initialized = 1
+
+ // Perform one-time expansion of instructions in progtable to
+ // their CC, V, and VCC variants
+ for as = 0; as < len(progtable); as++ {
+ if progtable[as].Flags == 0 {
+ continue
+ }
+ variant = as2variant(as)
+ for i = 0; i < len(addvariant); i++ {
+ as2 = variant2as(as, variant|addvariant[i])
+ if as2 != 0 && progtable[as2].Flags == 0 {
+ progtable[as2] = progtable[as]
+ }
+ }
+ }
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ initproginfo()
+
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ *info = progtable[ppc64.AADD]
+ gc.Fatal("proginfo: unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.RightRead /*CanRegRead |*/
+ }
+
+ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.From.Reg))
+ }
+ }
+
+ if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.To.Reg))
+ }
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ if p.As == obj.ADUFFZERO {
+ info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
+ info.Regset |= RtoB(ppc64.REG_R3)
+ }
+
+ if p.As == obj.ADUFFCOPY {
+ // TODO(austin) Revisit when duffcopy is implemented
+ info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+
+ info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+ }
+}
+
+// Instruction variants table. Initially this contains entries only
+// for the "base" form of each instruction. On the first call to
+// as2variant or variant2as, we'll add the variants to the table.
+var varianttable = [ppc64.ALAST][4]int{
+ ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
+ ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
+ ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
+ ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
+ ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
+ ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
+ ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
+ ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
+ ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
+ ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
+ ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
+ ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
+ ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
+ ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
+ ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
+ ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
+ ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
+ ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
+ ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
+ ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
+ ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
+ ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
+ ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
+ ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
+ ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
+ ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
+ ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
+ ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
+ ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
+ ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
+ ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
+ ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
+ ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
+ ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
+ ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
+ ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
+ ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
+ ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
+ ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
+ ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
+ ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
+ ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
+ ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
+ ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
+ ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
+ ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
+ ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
+ ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
+ ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
+ ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
+ ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
+ ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
+ ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
+ ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
+ ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
+ ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
+ ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
+ ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
+ ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
+ ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
+ ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
+ ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
+ ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
+ ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
+ ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
+ ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
+ ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
+ ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
+ ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
+ ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
+ ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
+ ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
+ ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
+ ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
+ ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
+ ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
+ ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
+ ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
+ ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
+ ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
+ ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
+ ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
+ ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
+}
+
+var initvariants_initialized int
+
+func initvariants() {
+ var i int
+ var j int
+
+ if initvariants_initialized != 0 {
+ return
+ }
+ initvariants_initialized = 1
+
+ for i = 0; i < len(varianttable); i++ {
+ if varianttable[i][0] == 0 {
+ // Instruction has no variants
+ varianttable[i][0] = i
+
+ continue
+ }
+
+ // Copy base form to other variants
+ if varianttable[i][0] == i {
+ for j = 0; j < len(varianttable[i]); j++ {
+ varianttable[varianttable[i][j]] = varianttable[i]
+ }
+ }
+ }
+}
+
+// as2variant returns the variant (V_*) flags of instruction as.
+func as2variant(as int) int {
+ var i int
+ initvariants()
+ for i = 0; i < len(varianttable[as]); i++ {
+ if varianttable[as][i] == as {
+ return i
+ }
+ }
+ gc.Fatal("as2variant: instruction %v is not a variant of itself", ppc64.Aconv(as))
+ return 0
+}
+
+// variant2as returns the instruction as with the given variant (V_*) flags.
+// If no such variant exists, this returns 0.
+func variant2as(as int, flags int) int {
+ initvariants()
+ return varianttable[as][flags]
+}
diff --git a/src/cmd/new9g/reg.go b/src/cmd/new9g/reg.go
new file mode 100644
index 0000000000..faed60d0ee
--- /dev/null
+++ b/src/cmd/new9g/reg.go
@@ -0,0 +1,164 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/ppc64"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 64
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".R16",
+ ".R17",
+ ".R18",
+ ".R19",
+ ".R20",
+ ".R21",
+ ".R22",
+ ".R23",
+ ".R24",
+ ".R25",
+ ".R26",
+ ".R27",
+ ".R28",
+ ".R29",
+ ".R30",
+ ".R31",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+ ".F16",
+ ".F17",
+ ".F18",
+ ".F19",
+ ".F20",
+ ".F21",
+ ".F22",
+ ".F23",
+ ".F24",
+ ".F25",
+ ".F26",
+ ".F27",
+ ".F28",
+ ".F29",
+ ".F30",
+ ".F31",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ var regbits uint64
+
+ // Exclude registers with fixed functions
+ regbits = 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS)
+
+ // Also exclude floating point registers with fixed constants
+ regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+
+ return regbits
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * track register variables including external registers:
+ * bit reg
+ * 0 R0
+ * 1 R1
+ * ... ...
+ * 31 R31
+ * 32+0 F0
+ * 32+1 F1
+ * ... ...
+ * 32+31 F31
+ */
+func RtoB(r int) uint64 {
+ if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
+ return 1 << uint(r-ppc64.REG_R0)
+ }
+ if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
+ return 1 << uint(32+r-ppc64.REG_F0)
+ }
+ return 0
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffffffff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b >>= 32
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_F0
+}
diff --git a/src/cmd/new9g/util.go b/src/cmd/new9g/util.go
new file mode 100644
index 0000000000..bb5eedb15a
--- /dev/null
+++ b/src/cmd/new9g/util.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}