aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/noder/import.go
diff options
context:
space:
mode:
authorMatthew Dempsky <mdempsky@google.com>2021-06-11 06:33:30 -0700
committerMatthew Dempsky <mdempsky@google.com>2021-06-12 00:11:57 +0000
commit2954f11eadf344786d0ec6e3e1d34f6a5c385246 (patch)
tree43b2652c4cc238571feed048e146e4c5ad21cbd2 /src/cmd/compile/internal/noder/import.go
parentc93d5d1a5245d4baa6824a2c88a6b79e3d895e4d (diff)
downloadgo-2954f11eadf344786d0ec6e3e1d34f6a5c385246.tar.gz
go-2954f11eadf344786d0ec6e3e1d34f6a5c385246.zip
[dev.typeparams] cmd/compile: scaffolding for export data experiments
This CL adds a simple framework for augmenting the current export data format by writing out additional data *after* the existing data, with an extra header before it that current readers ignore. In particular, this is used by unified IR to be able to experiment and iterate on export data designs without having to keep the go/internal/gcimporter and x/tools/go/gcexportdata importers in sync. Instead, they simply continue reading the existing data written out by typecheck/iexport.go. Change-Id: I883211c2892e2c7dec758b85ff6bc31b244440a0 Reviewed-on: https://go-review.googlesource.com/c/go/+/327169 Run-TryBot: Matthew Dempsky <mdempsky@google.com> Trust: Matthew Dempsky <mdempsky@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Diffstat (limited to 'src/cmd/compile/internal/noder/import.go')
-rw-r--r--src/cmd/compile/internal/noder/import.go87
1 files changed, 64 insertions, 23 deletions
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index 08e3f77b66..48f0e48028 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -31,6 +31,22 @@ import (
"cmd/internal/src"
)
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
type gcimports struct {
check *types2.Checker
packages map[string]*types2.Package
@@ -245,7 +261,7 @@ func readImportFile(path string, target *ir.Package, check *types2.Checker, pack
}
defer f.Close()
- r, end, err := findExportData(f)
+ r, end, newsize, err := findExportData(f)
if err != nil {
return
}
@@ -254,34 +270,51 @@ func readImportFile(path string, target *ir.Package, check *types2.Checker, pack
fmt.Printf("importing %s (%s)\n", path, f.Name())
}
- var c byte
- switch c, err = r.ReadByte(); {
- case err != nil:
- return
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
+ if err != nil {
+ return
+ }
- case c != 'i':
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- err = fmt.Errorf("unexpected package format byte: %v", c)
- return
- }
+ pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
- // Map string (and data) section into memory as a single large
- // string. This reduces heap fragmentation and allows
- // returning individual substrings very efficiently.
- pos := r.Offset()
- data, err := base.MapFile(r.File(), pos, end-pos)
- if err != nil {
- return
- }
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
- typecheck.ReadImports(pkg1, data)
+ pos := r.Offset()
- if packages != nil {
- pkg2, err = importer.ImportData(packages, data, path)
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
return
}
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
+ }
}
err = addFingerprint(path, f, end)
@@ -291,7 +324,7 @@ func readImportFile(path string, target *ir.Package, check *types2.Checker, pack
// findExportData returns a *bio.Reader positioned at the start of the
// binary export data section, and a file offset for where to stop
// reading.
-func findExportData(f *os.File) (r *bio.Reader, end int64, err error) {
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
r = bio.NewReader(f)
// check object header
@@ -334,6 +367,14 @@ func findExportData(f *os.File) (r *bio.Reader, end int64, err error) {
// process header lines
for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
+ }
+
line, err = r.ReadString('\n')
if err != nil {
return