aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/noder/import.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/noder/import.go')
-rw-r--r--src/cmd/compile/internal/noder/import.go302
1 files changed, 197 insertions, 105 deletions
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index 701e9001c8..48f0e48028 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"internal/buildcfg"
- "io"
"os"
pathpkg "path"
"runtime"
@@ -32,8 +31,24 @@ import (
"cmd/internal/src"
)
-// Temporary import helper to get type2-based type-checking going.
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
type gcimports struct {
+ check *types2.Checker
packages map[string]*types2.Package
}
@@ -46,13 +61,8 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*ty
panic("mode must be 0")
}
- path, err := resolveImportPath(path)
- if err != nil {
- return nil, err
- }
-
- lookup := func(path string) (io.ReadCloser, error) { return openPackage(path) }
- return importer.Import(m.packages, path, srcDir, lookup)
+ _, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
+ return pkg, err
}
func isDriveLetter(b byte) bool {
@@ -175,160 +185,242 @@ func resolveImportPath(path string) (string, error) {
return path, nil
}
-// TODO(mdempsky): Return an error instead.
func importfile(decl *syntax.ImportDecl) *types.Pkg {
- if decl.Path.Kind != syntax.StringLit {
- base.Errorf("import path must be a string")
+ path, err := parseImportPath(decl.Path)
+ if err != nil {
+ base.Errorf("%s", err)
return nil
}
- path, err := strconv.Unquote(decl.Path.Value)
+ pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
if err != nil {
- base.Errorf("import path must be a string")
+ base.Errorf("%s", err)
return nil
}
+ if pkg != ir.Pkgs.Unsafe && pkg.Height >= myheight {
+ myheight = pkg.Height + 1
+ }
+ return pkg
+}
+
+func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
+ if pathLit.Kind != syntax.StringLit {
+ return "", errors.New("import path must be a string")
+ }
+
+ path, err := strconv.Unquote(pathLit.Value)
+ if err != nil {
+ return "", errors.New("import path must be a string")
+ }
+
if err := checkImportPath(path, false); err != nil {
- base.Errorf("%s", err.Error())
- return nil
+ return "", err
}
+ return path, err
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
- base.Errorf("%s", err)
- return nil
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = ir.Pkgs.Unsafe, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
}
- importpkg := types.NewPkg(path, "")
- if importpkg.Direct {
- return importpkg // already fully loaded
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
}
- importpkg.Direct = true
- typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
- if path == "unsafe" {
- return importpkg // initialized with universe
+ if pkg1.Direct {
+ return
}
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
f, err := openPackage(path)
if err != nil {
- base.Errorf("could not import %q: %v", path, err)
- base.ErrorExit()
+ return
}
- imp := bio.NewReader(f)
- defer imp.Close()
- file := f.Name()
+ defer f.Close()
- // check object header
- p, err := imp.ReadString('\n')
+ r, end, newsize, err := findExportData(f)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "!<arch>\n" { // package archive
- // package export block should be first
- sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
- if sz <= 0 {
- base.Errorf("import %s: not a package file", file)
- base.ErrorExit()
- }
- p, err = imp.ReadString('\n')
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- }
- if !strings.HasPrefix(p, "go object ") {
- base.Errorf("import %s: not a go object file: %s", file, p)
- base.ErrorExit()
- }
- q := objabi.HeaderString()
- if p != q {
- base.Errorf("import %s: object is [%s] expected [%s]", file, p, q)
- base.ErrorExit()
- }
+ pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
- // process header lines
- for {
- p, err = imp.ReadString('\n')
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "\n" {
- break // header ends with blank line
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
}
}
- // Expect $$B\n to signal binary import format.
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
- // look for $$
- var c byte
- for {
- c, err = imp.ReadByte()
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
if err != nil {
- break
+ return
}
- if c == '$' {
- c, err = imp.ReadByte()
- if c == '$' || err != nil {
- break
- }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
}
+ end = fi.Size()
}
- // get character after $$
- if err == nil {
- c, _ = imp.ReadByte()
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
}
- var fingerprint goobj.FingerprintType
- switch c {
- case '\n':
- base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
- return nil
-
- case 'B':
- if base.Debug.Export != 0 {
- fmt.Printf("importing %s (%s)\n", path, file)
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
}
- imp.ReadByte() // skip \n after $$B
- c, err = imp.ReadByte()
+ line, err = r.ReadString('\n')
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
+ }
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- if c != 'i' {
- base.Errorf("import %s: unexpected package format byte: %v", file, c)
- base.ErrorExit()
- }
- fingerprint = typecheck.ReadImports(importpkg, imp)
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
- default:
- base.Errorf("no import in %q", path)
- base.ErrorExit()
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
}
+ copy(fingerprint[:], buf[:])
+
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ file := f.Name()
base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
}
-
- if importpkg.Height >= myheight {
- myheight = importpkg.Height + 1
- }
-
- return importpkg
+ return nil
}
// The linker uses the magic symbol prefixes "go." and "type."
@@ -431,7 +523,7 @@ func clearImports() {
s.Def = nil
continue
}
- if types.IsDotAlias(s) {
+ if s.Def != nil && s.Def.Sym() != s {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in CheckDotImports.