diff options
Diffstat (limited to 'src/cmd/compile/internal/gc/pgen.go')
-rw-r--r-- | src/cmd/compile/internal/gc/pgen.go | 425 |
1 files changed, 227 insertions, 198 deletions
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 353f4b08c9..221b733a07 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -5,6 +5,8 @@ package gc import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/ssa" "cmd/compile/internal/types" "cmd/internal/dwarf" @@ -22,35 +24,34 @@ import ( // "Portable" code generation. var ( - nBackendWorkers int // number of concurrent backend workers, set by a compiler flag - compilequeue []*Node // functions waiting to be compiled + compilequeue []ir.Node // functions waiting to be compiled ) -func emitptrargsmap(fn *Node) { - if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" { +func emitptrargsmap(fn ir.Node) { + if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" { return } - lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap") + lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap") - nptr := int(fn.Type.ArgWidth() / int64(Widthptr)) + nptr := int(fn.Type().ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) nbitmap := 1 - if fn.Type.NumResults() > 0 { + if fn.Type().NumResults() > 0 { nbitmap = 2 } off := duint32(lsym, 0, uint32(nbitmap)) off = duint32(lsym, off, uint32(bv.n)) - if fn.IsMethod() { - onebitwalktype1(fn.Type.Recvs(), 0, bv) + if ir.IsMethod(fn) { + onebitwalktype1(fn.Type().Recvs(), 0, bv) } - if fn.Type.NumParams() > 0 { - onebitwalktype1(fn.Type.Params(), 0, bv) + if fn.Type().NumParams() > 0 { + onebitwalktype1(fn.Type().Params(), 0, bv) } off = dbvec(lsym, off, bv) - if fn.Type.NumResults() > 0 { - onebitwalktype1(fn.Type.Results(), 0, bv) + if fn.Type().NumResults() > 0 { + onebitwalktype1(fn.Type().Results(), 0, bv) off = dbvec(lsym, off, bv) } @@ -67,40 +68,40 @@ func emitptrargsmap(fn *Node) { // really means, in memory, things with pointers needing zeroing at // the top of the stack and increasing in size. // Non-autos sort on offset. -func cmpstackvarlt(a, b *Node) bool { - if (a.Class() == PAUTO) != (b.Class() == PAUTO) { - return b.Class() == PAUTO +func cmpstackvarlt(a, b ir.Node) bool { + if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) { + return b.Class() == ir.PAUTO } - if a.Class() != PAUTO { - return a.Xoffset < b.Xoffset + if a.Class() != ir.PAUTO { + return a.Offset() < b.Offset() } - if a.Name.Used() != b.Name.Used() { - return a.Name.Used() + if a.Name().Used() != b.Name().Used() { + return a.Name().Used() } - ap := a.Type.HasPointers() - bp := b.Type.HasPointers() + ap := a.Type().HasPointers() + bp := b.Type().HasPointers() if ap != bp { return ap } - ap = a.Name.Needzero() - bp = b.Name.Needzero() + ap = a.Name().Needzero() + bp = b.Name().Needzero() if ap != bp { return ap } - if a.Type.Width != b.Type.Width { - return a.Type.Width > b.Type.Width + if a.Type().Width != b.Type().Width { + return a.Type().Width > b.Type().Width } - return a.Sym.Name < b.Sym.Name + return a.Sym().Name < b.Sym().Name } // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. -type byStackVar []*Node +type byStackVar []ir.Node func (s byStackVar) Len() int { return len(s) } func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } @@ -109,33 +110,33 @@ func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *ssafn) AllocFrame(f *ssa.Func) { s.stksize = 0 s.stkptrsize = 0 - fn := s.curfn.Func + fn := s.curfn.Func() // Mark the PAUTO's unused. for _, ln := range fn.Dcl { - if ln.Class() == PAUTO { - ln.Name.SetUsed(false) + if ln.Class() == ir.PAUTO { + ln.Name().SetUsed(false) } } for _, l := range f.RegAlloc { if ls, ok := l.(ssa.LocalSlot); ok { - ls.N.(*Node).Name.SetUsed(true) + ls.N.Name().SetUsed(true) } } scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { - if n, ok := v.Aux.(*Node); ok { + if n, ok := v.Aux.(ir.Node); ok { switch n.Class() { - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: // Don't modify nodfp; it is a global. if n != nodfp { - n.Name.SetUsed(true) + n.Name().SetUsed(true) } - case PAUTO: - n.Name.SetUsed(true) + case ir.PAUTO: + n.Name().SetUsed(true) } } if !scratchUsed { @@ -146,7 +147,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } if f.Config.NeedsFpScratch && scratchUsed { - s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64]) + s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) } sort.Sort(byStackVar(fn.Dcl)) @@ -154,18 +155,18 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op != ONAME || n.Class() != PAUTO { + if n.Op() != ir.ONAME || n.Class() != ir.PAUTO { continue } - if !n.Name.Used() { + if !n.Name().Used() { fn.Dcl = fn.Dcl[:i] break } - dowidth(n.Type) - w := n.Type.Width + dowidth(n.Type()) + w := n.Type().Width if w >= thearch.MAXWIDTH || w < 0 { - Fatalf("bad width") + base.Fatalf("bad width") } if w == 0 && lastHasPtr { // Pad between a pointer-containing object and a zero-sized object. @@ -175,8 +176,8 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { w = 1 } s.stksize += w - s.stksize = Rnd(s.stksize, int64(n.Type.Align)) - if n.Type.HasPointers() { + s.stksize = Rnd(s.stksize, int64(n.Type().Align)) + if n.Type().HasPointers() { s.stkptrsize = s.stksize lastHasPtr = true } else { @@ -185,59 +186,58 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { s.stksize = Rnd(s.stksize, int64(Widthptr)) } - n.Xoffset = -s.stksize + n.SetOffset(-s.stksize) } s.stksize = Rnd(s.stksize, int64(Widthreg)) s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) } -func funccompile(fn *Node) { +func funccompile(fn ir.Node) { if Curfn != nil { - Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) + base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym()) } - if fn.Type == nil { - if nerrors == 0 { - Fatalf("funccompile missing type") + if fn.Type() == nil { + if base.Errors() == 0 { + base.Fatalf("funccompile missing type") } return } // assign parameter offsets - dowidth(fn.Type) + dowidth(fn.Type()) - if fn.Nbody.Len() == 0 { + if fn.Body().Len() == 0 { // Initialize ABI wrappers if necessary. - fn.Func.initLSym(false) + initLSym(fn.Func(), false) emitptrargsmap(fn) return } - dclcontext = PAUTO + dclcontext = ir.PAUTO Curfn = fn compile(fn) Curfn = nil - dclcontext = PEXTERN + dclcontext = ir.PEXTERN } -func compile(fn *Node) { - saveerrors() - +func compile(fn ir.Node) { + errorsBefore := base.Errors() order(fn) - if nerrors != 0 { + if base.Errors() > errorsBefore { return } // Set up the function's LSym early to avoid data races with the assemblers. // Do this before walk, as walk needs the LSym to set attributes/relocations // (e.g. in markTypeUsedInInterface). - fn.Func.initLSym(true) + initLSym(fn.Func(), true) walk(fn) - if nerrors != 0 { + if base.Errors() > errorsBefore { return } if instrumenting { @@ -247,7 +247,7 @@ func compile(fn *Node) { // From this point, there should be no uses of Curfn. Enforce that. Curfn = nil - if fn.funcname() == "_" { + if ir.FuncName(fn) == "_" { // We don't need to generate code for this function, just report errors in its body. // At this point we've generated any errors needed. // (Beyond here we generate only non-spec errors, like "stack frame too large".) @@ -259,15 +259,15 @@ func compile(fn *Node) { // be types of stack objects. We need to do this here // because symbols must be allocated before the parallel // phase of the compiler. - for _, n := range fn.Func.Dcl { + for _, n := range fn.Func().Dcl { switch n.Class() { - case PPARAM, PPARAMOUT, PAUTO: - if livenessShouldTrack(n) && n.Name.Addrtaken() { - dtypesym(n.Type) + case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO: + if livenessShouldTrack(n) && n.Name().Addrtaken() { + dtypesym(n.Type()) // Also make sure we allocate a linker symbol // for the stack object data, for the same reason. - if fn.Func.lsym.Func().StackObjects == nil { - fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj") + if fn.Func().LSym.Func().StackObjects == nil { + fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj") } } } @@ -284,29 +284,29 @@ func compile(fn *Node) { // If functions are not compiled immediately, // they are enqueued in compilequeue, // which is drained by compileFunctions. -func compilenow(fn *Node) bool { +func compilenow(fn ir.Node) bool { // Issue 38068: if this function is a method AND an inline // candidate AND was not inlined (yet), put it onto the compile // queue instead of compiling it immediately. This is in case we // wind up inlining it into a method wrapper that is generated by // compiling a function later on in the xtop list. - if fn.IsMethod() && isInlinableButNotInlined(fn) { + if ir.IsMethod(fn) && isInlinableButNotInlined(fn) { return false } - return nBackendWorkers == 1 && Debug_compilelater == 0 + return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0 } // isInlinableButNotInlined returns true if 'fn' was marked as an // inline candidate but then never inlined (presumably because we // found no call sites). -func isInlinableButNotInlined(fn *Node) bool { - if fn.Func.Nname.Func.Inl == nil { +func isInlinableButNotInlined(fn ir.Node) bool { + if fn.Func().Nname.Func().Inl == nil { return false } - if fn.Sym == nil { + if fn.Sym() == nil { return true } - return !fn.Sym.Linksym().WasInlined() + return !fn.Sym().Linksym().WasInlined() } const maxStackSize = 1 << 30 @@ -315,12 +315,12 @@ const maxStackSize = 1 << 30 // uses it to generate a plist, // and flushes that plist to machine code. // worker indicates which of the backend workers is doing the processing. -func compileSSA(fn *Node, worker int) { +func compileSSA(fn ir.Node, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. - if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize { + if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos}) + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()}) largeStackFramesMu.Unlock() return } @@ -336,14 +336,14 @@ func compileSSA(fn *Node, worker int) { if pp.Text.To.Offset >= maxStackSize { largeStackFramesMu.Lock() locals := f.Frontend().(*ssafn).stksize - largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos}) + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) largeStackFramesMu.Unlock() return } pp.Flush() // assemble, fill in boilerplate, etc. // fieldtrack must be called after pp.Flush. See issue 20014. - fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) + fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack) } func init() { @@ -360,7 +360,7 @@ func compileFunctions() { sizeCalculationDisabled = true // not safe to calculate sizes concurrently if race.Enabled { // Randomize compilation order to try to shake out races. - tmp := make([]*Node, len(compilequeue)) + tmp := make([]ir.Node, len(compilequeue)) perm := rand.Perm(len(compilequeue)) for i, v := range perm { tmp[v] = compilequeue[i] @@ -371,13 +371,13 @@ func compileFunctions() { // since they're most likely to be the slowest. // This helps avoid stragglers. sort.Slice(compilequeue, func(i, j int) bool { - return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len() + return compilequeue[i].Body().Len() > compilequeue[j].Body().Len() }) } var wg sync.WaitGroup - Ctxt.InParallel = true - c := make(chan *Node, nBackendWorkers) - for i := 0; i < nBackendWorkers; i++ { + base.Ctxt.InParallel = true + c := make(chan ir.Node, base.Flag.LowerC) + for i := 0; i < base.Flag.LowerC; i++ { wg.Add(1) go func(worker int) { for fn := range c { @@ -392,46 +392,75 @@ func compileFunctions() { close(c) compilequeue = nil wg.Wait() - Ctxt.InParallel = false + base.Ctxt.InParallel = false sizeCalculationDisabled = false } } func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { - fn := curfn.(*Node) - if fn.Func.Nname != nil { - if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { - Fatalf("unexpected fnsym: %v != %v", fnsym, expect) - } - } - - var apdecls []*Node + fn := curfn.(ir.Node) + if fn.Func().Nname != nil { + if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect { + base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) + } + } + + // Back when there were two different *Funcs for a function, this code + // was not consistent about whether a particular *Node being processed + // was an ODCLFUNC or ONAME node. Partly this is because inlined function + // bodies have no ODCLFUNC node, which was it's own inconsistency. + // In any event, the handling of the two different nodes for DWARF purposes + // was subtly different, likely in unintended ways. CL 272253 merged the + // two nodes' Func fields, so that code sees the same *Func whether it is + // holding the ODCLFUNC or the ONAME. This resulted in changes in the + // DWARF output. To preserve the existing DWARF output and leave an + // intentional change for a future CL, this code does the following when + // fn.Op == ONAME: + // + // 1. Disallow use of createComplexVars in createDwarfVars. + // It was not possible to reach that code for an ONAME before, + // because the DebugInfo was set only on the ODCLFUNC Func. + // Calling into it in the ONAME case causes an index out of bounds panic. + // + // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func, + // not the ONAME Func. Populating apdecls for the ONAME case results + // in selected being populated after createSimpleVars is called in + // createDwarfVars, and then that causes the loop to skip all the entries + // in dcl, meaning that the RecordAutoType calls don't happen. + // + // These two adjustments keep toolstash -cmp working for now. + // Deciding the right answer is, as they say, future work. + isODCLFUNC := fn.Op() == ir.ODCLFUNC + + var apdecls []ir.Node // Populate decls for fn. - for _, n := range fn.Func.Dcl { - if n.Op != ONAME { // might be OTYPE or OLITERAL - continue - } - switch n.Class() { - case PAUTO: - if !n.Name.Used() { - // Text == nil -> generating abstract function - if fnsym.Func().Text != nil { - Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + if isODCLFUNC { + for _, n := range fn.Func().Dcl { + if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL + continue + } + switch n.Class() { + case ir.PAUTO: + if !n.Name().Used() { + // Text == nil -> generating abstract function + if fnsym.Func().Text != nil { + base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + } + continue } + case ir.PPARAM, ir.PPARAMOUT: + default: continue } - case PPARAM, PPARAMOUT: - default: - continue + apdecls = append(apdecls, n) + fnsym.Func().RecordAutoType(ngotype(n).Linksym()) } - apdecls = append(apdecls, n) - fnsym.Func().RecordAutoType(ngotype(n).Linksym()) } - decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls) + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls) // For each type referenced by the functions auto vars but not - // already referenced by a dwarf var, attach a dummy relocation to + // already referenced by a dwarf var, attach an R_USETYPE relocation to // the function symbol to insure that the type included in DWARF // processing during linking. typesyms := []*obj.LSym{} @@ -446,22 +475,22 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S } fnsym.Func().Autot = nil - var varScopes []ScopeID + var varScopes []ir.ScopeID for _, decl := range decls { pos := declPos(decl) - varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) + varScopes = append(varScopes, findScope(fn.Func().Marks, pos)) } scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) var inlcalls dwarf.InlCalls - if genDwarfInline > 0 { + if base.Flag.GenDwarfInl > 0 { inlcalls = assembleInlines(fnsym, dwarfVars) } return scopes, inlcalls } -func declPos(decl *Node) src.XPos { - if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { +func declPos(decl ir.Node) src.XPos { + if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) { // It's not clear which position is correct for captured variables here: // * decl.Pos is the wrong position for captured variables, in the inner // function, but it is the right position in the outer function. @@ -476,19 +505,19 @@ func declPos(decl *Node) src.XPos { // case statement. // This code is probably wrong for type switch variables that are also // captured. - return decl.Name.Defn.Pos + return decl.Name().Defn.Pos() } - return decl.Pos + return decl.Pos() } // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { +func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { var vars []*dwarf.Var - var decls []*Node - selected := make(map[*Node]bool) + var decls []ir.Node + selected := make(map[ir.Node]bool) for _, n := range apDecls { - if n.IsAutoTmp() { + if ir.IsAutoTmp(n) { continue } @@ -499,14 +528,14 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, return decls, vars, selected } -func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { +func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var { var abbrev int - offs := n.Xoffset + offs := n.Offset() switch n.Class() { - case PAUTO: + case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO - if Ctxt.FixedFrameSize() == 0 { + if base.Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { @@ -514,32 +543,32 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { offs -= int64(Widthptr) } - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM - offs += Ctxt.FixedFrameSize() + offs += base.Ctxt.FixedFrameSize() default: - Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) } - typename := dwarf.InfoPrefix + typesymname(n.Type) + typename := dwarf.InfoPrefix + typesymname(n.Type()) delete(fnsym.Func().Autot, ngotype(n).Linksym()) inlIndex := 0 - if genDwarfInline > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if base.Flag.GenDwarfInl > 1 { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM } } } - declpos := Ctxt.InnermostPos(declPos(n)) + declpos := base.Ctxt.InnermostPos(declPos(n)) return &dwarf.Var{ - Name: n.Sym.Name, - IsReturnValue: n.Class() == PPARAMOUT, - IsInlFormal: n.Name.InlFormal(), + Name: n.Sym().Name, + IsReturnValue: n.Class() == ir.PPARAMOUT, + IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, StackOffset: int32(offs), - Type: Ctxt.Lookup(typename), + Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), DeclCol: declpos.Col(), @@ -550,19 +579,19 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var { // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) { - debugInfo := fn.DebugInfo +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) { + debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. - var decls []*Node + var decls []ir.Node var vars []*dwarf.Var - ssaVars := make(map[*Node]bool) + ssaVars := make(map[ir.Node]bool) for varID, dvar := range debugInfo.Vars { - n := dvar.(*Node) + n := dvar ssaVars[n] = true for _, slot := range debugInfo.VarSlots[varID] { - ssaVars[debugInfo.Slots[slot].N.(*Node)] = true + ssaVars[debugInfo.Slots[slot].N] = true } if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { @@ -576,12 +605,12 @@ func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*N // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var - var decls []*Node - var selected map[*Node]bool - if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil { + var decls []ir.Node + var selected map[ir.Node]bool + if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) } else { decls, vars, selected = createSimpleVars(fnsym, apDecls) @@ -608,11 +637,11 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw if _, found := selected[n]; found { continue } - c := n.Sym.Name[0] - if c == '.' || n.Type.IsUntyped() { + c := n.Sym().Name[0] + if c == '.' || n.Type().IsUntyped() { continue } - if n.Class() == PPARAM && !canSSAType(n.Type) { + if n.Class() == ir.PPARAM && !canSSAType(n.Type()) { // SSA-able args get location lists, and may move in and // out of registers, so those are handled elsewhere. // Autos and named output params seem to get handled @@ -624,13 +653,13 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw decls = append(decls, n) continue } - typename := dwarf.InfoPrefix + typesymname(n.Type) + typename := dwarf.InfoPrefix + typesymname(n.Type()) decls = append(decls, n) abbrev := dwarf.DW_ABRV_AUTO_LOCLIST - isReturnValue := (n.Class() == PPARAMOUT) - if n.Class() == PPARAM || n.Class() == PPARAMOUT { + isReturnValue := (n.Class() == ir.PPARAMOUT) + if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - } else if n.Class() == PAUTOHEAP { + } else if n.Class() == ir.PAUTOHEAP { // If dcl in question has been promoted to heap, do a bit // of extra work to recover original class (auto or param); // see issue 30908. This insures that we get the proper @@ -638,28 +667,28 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw // misleading location for the param (we want pointer-to-heap // and not stack). // TODO(thanm): generate a better location expression - stackcopy := n.Name.Param.Stackcopy - if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) { + stackcopy := n.Name().Param.Stackcopy + if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST - isReturnValue = (stackcopy.Class() == PPARAMOUT) + isReturnValue = (stackcopy.Class() == ir.PPARAMOUT) } } inlIndex := 0 - if genDwarfInline > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if base.Flag.GenDwarfInl > 1 { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } } - declpos := Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos()) vars = append(vars, &dwarf.Var{ - Name: n.Sym.Name, + Name: n.Sym().Name, IsReturnValue: isReturnValue, Abbrev: abbrev, - StackOffset: int32(n.Xoffset), - Type: Ctxt.Lookup(typename), + StackOffset: int32(n.Offset()), + Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), DeclLine: declpos.RelLine(), DeclCol: declpos.Col(), @@ -679,14 +708,14 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw // function that is not local to the package being compiled, then the // names of the variables may have been "versioned" to avoid conflicts // with local vars; disregard this versioning when sorting. -func preInliningDcls(fnsym *obj.LSym) []*Node { - fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) - var rdcl []*Node - for _, n := range fn.Func.Inl.Dcl { - c := n.Sym.Name[0] +func preInliningDcls(fnsym *obj.LSym) []ir.Node { + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node) + var rdcl []ir.Node + for _, n := range fn.Func().Inl.Dcl { + c := n.Sym().Name[0] // Avoid reporting "_" parameters, since if there are more than // one, it can result in a collision later on, as in #23179. - if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() { + if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() { continue } rdcl = append(rdcl, n) @@ -698,33 +727,33 @@ func preInliningDcls(fnsym *obj.LSym) []*Node { // stack pointer, suitable for use in a DWARF location entry. This has nothing // to do with its offset in the user variable. func stackOffset(slot ssa.LocalSlot) int32 { - n := slot.N.(*Node) - var base int64 + n := slot.N + var off int64 switch n.Class() { - case PAUTO: - if Ctxt.FixedFrameSize() == 0 { - base -= int64(Widthptr) + case ir.PAUTO: + if base.Ctxt.FixedFrameSize() == 0 { + off -= int64(Widthptr) } if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" { // There is a word space for FP on ARM64 even if the frame pointer is disabled - base -= int64(Widthptr) + off -= int64(Widthptr) } - case PPARAM, PPARAMOUT: - base += Ctxt.FixedFrameSize() + case ir.PPARAM, ir.PPARAMOUT: + off += base.Ctxt.FixedFrameSize() } - return int32(base + n.Xoffset + slot.Off) + return int32(off + n.Offset() + slot.Off) } // createComplexVar builds a single DWARF variable entry and location list. -func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { - debug := fn.DebugInfo - n := debug.Vars[varID].(*Node) +func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var { + debug := fn.DebugInfo.(*ssa.FuncDebug) + n := debug.Vars[varID] var abbrev int switch n.Class() { - case PAUTO: + case ir.PAUTO: abbrev = dwarf.DW_ABRV_AUTO_LOCLIST - case PPARAM, PPARAMOUT: + case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM_LOCLIST default: return nil @@ -734,21 +763,21 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { delete(fnsym.Func().Autot, gotype) typename := dwarf.InfoPrefix + gotype.Name[len("type."):] inlIndex := 0 - if genDwarfInline > 1 { - if n.Name.InlFormal() || n.Name.InlLocal() { - inlIndex = posInlIndex(n.Pos) + 1 - if n.Name.InlFormal() { + if base.Flag.GenDwarfInl > 1 { + if n.Name().InlFormal() || n.Name().InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.Name().InlFormal() { abbrev = dwarf.DW_ABRV_PARAM_LOCLIST } } } - declpos := Ctxt.InnermostPos(n.Pos) + declpos := base.Ctxt.InnermostPos(n.Pos()) dvar := &dwarf.Var{ - Name: n.Sym.Name, - IsReturnValue: n.Class() == PPARAMOUT, - IsInlFormal: n.Name.InlFormal(), + Name: n.Sym().Name, + IsReturnValue: n.Class() == ir.PPARAMOUT, + IsInlFormal: n.Name().InlFormal(), Abbrev: abbrev, - Type: Ctxt.Lookup(typename), + Type: base.Ctxt.Lookup(typename), // The stack offset is used as a sorting key, so for decomposed // variables just give it the first one. It's not used otherwise. // This won't work well if the first slot hasn't been assigned a stack @@ -763,7 +792,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var { list := debug.LocationLists[varID] if len(list) != 0 { dvar.PutLocationList = func(listSym, startPC dwarf.Sym) { - debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) + debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) } } return dvar |