aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeschi Kreinick <heschi@google.com>2018-06-20 16:45:25 -0400
committerHeschi Kreinick <heschi@google.com>2018-06-22 19:16:14 +0000
commit726a2d04ea457430d0ecfd8ee1e9ffebcb78e2f6 (patch)
treed1210b1592c9886560870f531585666e6ec014af
parent3dced519cbabc213df369d9112206986e62687fa (diff)
downloadgo-726a2d04ea457430d0ecfd8ee1e9ffebcb78e2f6.tar.gz
go-726a2d04ea457430d0ecfd8ee1e9ffebcb78e2f6.zip
cmd/link: support DWARF compression on Darwin
We want to compress DWARF even on macOS, but the native toolchain isn't going to understand it. Add a flag that can be used to disable compression, then add Darwin to the whitelist used during internal linking. Unlike GNU ld, the Darwin linker doesn't have a handy linker flag to do compression. But since we're already doing surgery to put the DWARF in the output executable in the first place, compressing it at the same time isn't unduly difficult. This does have the slightly odd effect of compressing some Apple proprietary debug sections, which absolutely nothing will understand. Leaving them uncompressed didn't make much sense, though, since I doubt they're useful without (say) __debug_info. Updates #11799 Change-Id: Ie00b0215c630a798c59d009a641e2d13f0e7ea01 Reviewed-on: https://go-review.googlesource.com/120155 Run-TryBot: Heschi Kreinick <heschi@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
-rw-r--r--src/cmd/link/internal/ld/dwarf.go3
-rw-r--r--src/cmd/link/internal/ld/lib.go4
-rw-r--r--src/cmd/link/internal/ld/link.go7
-rw-r--r--src/cmd/link/internal/ld/macho_combine_dwarf.go153
-rw-r--r--src/cmd/link/internal/ld/main.go1
5 files changed, 142 insertions, 26 deletions
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 1dd45969c7..830d81d446 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -1951,7 +1951,8 @@ func dwarfaddelfsectionsyms(ctxt *Link) {
// relocations are applied. After this, dwarfp will contain a
// different (new) set of symbols, and sections may have been replaced.
func dwarfcompress(ctxt *Link) {
- if !(ctxt.IsELF || ctxt.HeadType == objabi.Hwindows) || ctxt.LinkMode == LinkExternal {
+ supported := ctxt.IsELF || ctxt.HeadType == objabi.Hwindows || ctxt.HeadType == objabi.Hdarwin
+ if !ctxt.compressDWARF || !supported || ctxt.LinkMode != LinkInternal {
return
}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index fca6bdc1e2..0fe0b42014 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1222,7 +1222,7 @@ func (ctxt *Link) hostlink() {
}
const compressDWARF = "-Wl,--compress-debug-sections=zlib-gnu"
- if linkerFlagSupported(argv[0], compressDWARF) {
+ if ctxt.compressDWARF && linkerFlagSupported(argv[0], compressDWARF) {
argv = append(argv, compressDWARF)
}
@@ -1336,7 +1336,7 @@ func (ctxt *Link) hostlink() {
}
// For os.Rename to work reliably, must be in same directory as outfile.
combinedOutput := *flagOutfile + "~"
- isIOS, err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput, ctxt.BuildMode)
+ isIOS, err := machoCombineDwarf(ctxt, *flagOutfile, dsym, combinedOutput)
if err != nil {
Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
}
diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go
index a790fd084b..2e66cf857c 100644
--- a/src/cmd/link/internal/ld/link.go
+++ b/src/cmd/link/internal/ld/link.go
@@ -63,9 +63,10 @@ type Link struct {
IsELF bool
HeadType objabi.HeadType
- linkShared bool // link against installed Go shared libraries
- LinkMode LinkMode
- BuildMode BuildMode
+ linkShared bool // link against installed Go shared libraries
+ LinkMode LinkMode
+ BuildMode BuildMode
+ compressDWARF bool
Tlsg *sym.Symbol
Libdir []string
diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go
index 06aa3c6f4c..95bd4c7c36 100644
--- a/src/cmd/link/internal/ld/macho_combine_dwarf.go
+++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go
@@ -6,6 +6,7 @@ package ld
import (
"bytes"
+ "compress/zlib"
"debug/macho"
"encoding/binary"
"fmt"
@@ -93,7 +94,7 @@ func (r loadCmdReader) WriteAt(offset int64, data interface{}) error {
// header to add the DWARF sections. (Use ld's -headerpad option)
// dsym is the path to the macho file containing DWARF from dsymutil.
// outexe is the path where the combined executable should be saved.
-func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) (bool, error) {
+func machoCombineDwarf(ctxt *Link, inexe, dsym, outexe string) (bool, error) {
exef, err := os.Open(inexe)
if err != nil {
return false, err
@@ -156,6 +157,13 @@ func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) (bool, e
return false, fmt.Errorf("missing __DWARF segment")
}
+ // Try to compress the DWARF sections. This includes some Apple
+ // proprietary sections like __apple_types.
+ compressedSects, compressedBytes, err := machoCompressSections(ctxt, dwarfm)
+ if err != nil {
+ return false, err
+ }
+
// Now copy the dwarf data into the output.
// Kernel requires all loaded segments to be page-aligned in the file,
// even though we mark this one as being 0 bytes of virtual address space.
@@ -168,15 +176,27 @@ func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) (bool, e
if _, err = dwarff.Seek(int64(realdwarf.Offset), 0); err != nil {
return false, err
}
- if _, err := io.CopyN(outf, dwarff, int64(realdwarf.Filesz)); err != nil {
- return false, err
+
+ // Write out the compressed sections, or the originals if we gave up
+ // on compressing them.
+ var dwarfsize uint64
+ if compressedBytes != nil {
+ dwarfsize = uint64(len(compressedBytes))
+ if _, err := outf.Write(compressedBytes); err != nil {
+ return false, err
+ }
+ } else {
+ if _, err := io.CopyN(outf, dwarff, int64(realdwarf.Filesz)); err != nil {
+ return false, err
+ }
+ dwarfsize = realdwarf.Filesz
}
// And finally the linkedit section.
if _, err = exef.Seek(int64(linkseg.Offset), 0); err != nil {
return false, err
}
- linkstart = machoCalcStart(linkseg.Offset, uint64(dwarfstart)+realdwarf.Filesz, pageAlign)
+ linkstart = machoCalcStart(linkseg.Offset, uint64(dwarfstart)+dwarfsize, pageAlign)
linkoffset = uint32(linkstart - int64(linkseg.Offset))
if _, err = outf.Seek(linkstart, 0); err != nil {
return false, err
@@ -196,14 +216,14 @@ func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) (bool, e
if availablePadding < int64(realdwarf.Len) {
return false, fmt.Errorf("No room to add dwarf info. Need at least %d padding bytes, found %d", realdwarf.Len, availablePadding)
}
- // First, copy the dwarf load command into the header
+ // First, copy the dwarf load command into the header. It will be
+ // updated later with new offsets and lengths as necessary.
if _, err = outf.Seek(dwarfCmdOffset, 0); err != nil {
return false, err
}
if _, err := io.CopyN(outf, bytes.NewReader(realdwarf.Raw()), int64(realdwarf.Len)); err != nil {
return false, err
}
-
if _, err = outf.Seek(int64(unsafe.Offsetof(exem.FileHeader.Ncmd)), 0); err != nil {
return false, err
}
@@ -244,7 +264,76 @@ func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) (bool, e
return false, err
}
}
- return false, machoUpdateDwarfHeader(&reader, buildmode)
+ // Do the final update of the DWARF segment's load command.
+ return false, machoUpdateDwarfHeader(&reader, ctxt.BuildMode, compressedSects)
+}
+
+// machoCompressSections tries to compress the DWARF segments in dwarfm,
+// returning the updated sections and segment contents, nils if the sections
+// weren't compressed, or an error if there was a problem reading dwarfm.
+func machoCompressSections(ctxt *Link, dwarfm *macho.File) ([]*macho.Section, []byte, error) {
+ if !ctxt.compressDWARF {
+ return nil, nil, nil
+ }
+
+ dwarfseg := dwarfm.Segment("__DWARF")
+ var sects []*macho.Section
+ var bytes []byte
+
+ for _, sect := range dwarfm.Sections {
+ if sect.Seg != "__DWARF" {
+ continue
+ }
+
+ // As of writing, there are no relocations in dsymutil's output
+ // so there's no point in worrying about them. Bail out if that
+ // changes.
+ if sect.Nreloc != 0 {
+ return nil, nil, nil
+ }
+
+ data, err := sect.Data()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ compressed, contents, err := machoCompressSection(data)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ newSec := *sect
+ newSec.Offset = uint32(dwarfseg.Offset) + uint32(len(bytes))
+ newSec.Addr = dwarfseg.Addr + uint64(len(bytes))
+ if compressed {
+ newSec.Name = "__z" + sect.Name[2:]
+ newSec.Size = uint64(len(contents))
+ }
+ sects = append(sects, &newSec)
+ bytes = append(bytes, contents...)
+ }
+ return sects, bytes, nil
+}
+
+// machoCompressSection compresses secBytes if it results in less data.
+func machoCompressSection(sectBytes []byte) (compressed bool, contents []byte, err error) {
+ var buf bytes.Buffer
+ buf.Write([]byte("ZLIB"))
+ var sizeBytes [8]byte
+ binary.BigEndian.PutUint64(sizeBytes[:], uint64(len(sectBytes)))
+ buf.Write(sizeBytes[:])
+
+ z := zlib.NewWriter(&buf)
+ if _, err := z.Write(sectBytes); err != nil {
+ return false, nil, err
+ }
+ if err := z.Close(); err != nil {
+ return false, nil, err
+ }
+ if len(buf.Bytes()) >= len(sectBytes) {
+ return false, sectBytes, nil
+ }
+ return true, buf.Bytes(), nil
}
// machoUpdateSegment updates the load command for a moved segment.
@@ -267,10 +356,10 @@ func machoUpdateSegment(r loadCmdReader, seg, sect interface{}) error {
return err
}
// There shouldn't be any sections, but just to make sure...
- return machoUpdateSections(r, segValue, reflect.ValueOf(sect), uint64(linkoffset), 0)
+ return machoUpdateSections(r, segValue, reflect.ValueOf(sect), uint64(linkoffset), 0, nil)
}
-func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset, deltaAddr uint64) error {
+func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset, deltaAddr uint64, compressedSects []*macho.Section) error {
iseg := reflect.Indirect(seg)
nsect := iseg.FieldByName("Nsect").Uint()
if nsect == 0 {
@@ -282,19 +371,35 @@ func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset,
offsetField := isect.FieldByName("Offset")
reloffField := isect.FieldByName("Reloff")
addrField := isect.FieldByName("Addr")
+ nameField := isect.FieldByName("Name")
+ sizeField := isect.FieldByName("Size")
sectSize := int64(isect.Type().Size())
for i := uint64(0); i < nsect; i++ {
if err := r.ReadAt(sectOffset, sect.Interface()); err != nil {
return err
}
- if offsetField.Uint() != 0 {
- offsetField.SetUint(offsetField.Uint() + deltaOffset)
- }
- if reloffField.Uint() != 0 {
- reloffField.SetUint(reloffField.Uint() + deltaOffset)
- }
- if addrField.Uint() != 0 {
- addrField.SetUint(addrField.Uint() + deltaAddr)
+ if compressedSects != nil {
+ cSect := compressedSects[i]
+ var name [16]byte
+ copy(name[:], []byte(cSect.Name))
+ nameField.Set(reflect.ValueOf(name))
+ sizeField.SetUint(cSect.Size)
+ if cSect.Offset != 0 {
+ offsetField.SetUint(uint64(cSect.Offset) + deltaOffset)
+ }
+ if cSect.Addr != 0 {
+ addrField.SetUint(cSect.Addr + deltaAddr)
+ }
+ } else {
+ if offsetField.Uint() != 0 {
+ offsetField.SetUint(offsetField.Uint() + deltaOffset)
+ }
+ if reloffField.Uint() != 0 {
+ reloffField.SetUint(reloffField.Uint() + deltaOffset)
+ }
+ if addrField.Uint() != 0 {
+ addrField.SetUint(addrField.Uint() + deltaAddr)
+ }
}
if err := r.WriteAt(sectOffset, sect.Interface()); err != nil {
return err
@@ -305,7 +410,7 @@ func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset,
}
// machoUpdateDwarfHeader updates the DWARF segment load command.
-func machoUpdateDwarfHeader(r *loadCmdReader, buildmode BuildMode) error {
+func machoUpdateDwarfHeader(r *loadCmdReader, buildmode BuildMode, compressedSects []*macho.Section) error {
var seg, sect interface{}
cmd, err := r.Next()
if err != nil {
@@ -322,10 +427,18 @@ func machoUpdateDwarfHeader(r *loadCmdReader, buildmode BuildMode) error {
return err
}
segv := reflect.ValueOf(seg).Elem()
-
segv.FieldByName("Offset").SetUint(uint64(dwarfstart))
segv.FieldByName("Addr").SetUint(uint64(dwarfaddr))
+ if compressedSects != nil {
+ var segSize uint64
+ for _, newSect := range compressedSects {
+ segSize += newSect.Size
+ }
+ segv.FieldByName("Filesz").SetUint(segSize)
+ segv.FieldByName("Memsz").SetUint(uint64(Rnd(int64(segSize), 1<<pageAlign)))
+ }
+
deltaOffset := uint64(dwarfstart) - realdwarf.Offset
deltaAddr := uint64(dwarfaddr) - realdwarf.Addr
@@ -344,7 +457,7 @@ func machoUpdateDwarfHeader(r *loadCmdReader, buildmode BuildMode) error {
if err := r.WriteAt(0, seg); err != nil {
return err
}
- return machoUpdateSections(*r, segv, reflect.ValueOf(sect), deltaOffset, deltaAddr)
+ return machoUpdateSections(*r, segv, reflect.ValueOf(sect), deltaOffset, deltaAddr, compressedSects)
}
func machoUpdateLoadCommand(r loadCmdReader, cmd interface{}, fields ...string) error {
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index e012383e69..d7929d59fd 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -122,6 +122,7 @@ func Main(arch *sys.Arch, theArch Arch) {
flag.BoolVar(&ctxt.linkShared, "linkshared", false, "link against installed Go shared libraries")
flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`")
flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`")
+ flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible")
objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF", addbuildinfo)
objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) })
objabi.AddVersionFlag() // -V