aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Matloob <matloob@golang.org>2015-11-11 12:39:30 -0500
committerMichael Matloob <matloob@golang.org>2015-11-12 17:04:45 +0000
commit432cb66f16b2bb6a167725057168bbe4aefe5fb5 (patch)
tree0a6aaf45df2810dc7276212496a4b647ed0bb6d2
parentb5a0c67fcc2f87b5e2fd04e023f9a0b2f3d759da (diff)
downloadgo-432cb66f16b2bb6a167725057168bbe4aefe5fb5.tar.gz
go-432cb66f16b2bb6a167725057168bbe4aefe5fb5.zip
runtime: break out system-specific constants into package sys
runtime/internal/sys will hold system-, architecture- and config- specific constants. Updates #11647 Change-Id: I6db29c312556087a42e8d2bdd9af40d157c56b54 Reviewed-on: https://go-review.googlesource.com/16817 Reviewed-by: Russ Cox <rsc@golang.org>
-rw-r--r--.gitignore2
-rw-r--r--src/cmd/compile/internal/gc/racewalk.go2
-rw-r--r--src/cmd/dist/build.go2
-rw-r--r--src/cmd/dist/buildruntime.go22
-rw-r--r--src/cmd/dist/deps.go103
-rw-r--r--src/go/build/deps_test.go3
-rw-r--r--src/runtime/alg.go13
-rw-r--r--src/runtime/arch_386.go19
-rw-r--r--src/runtime/arch_amd64.go19
-rw-r--r--src/runtime/arch_amd64p32.go19
-rw-r--r--src/runtime/arch_arm.go19
-rw-r--r--src/runtime/arch_arm64.go19
-rw-r--r--src/runtime/arch_ppc64.go19
-rw-r--r--src/runtime/arch_ppc64le.go19
-rw-r--r--src/runtime/cgocall.go25
-rw-r--r--src/runtime/export_test.go7
-rw-r--r--src/runtime/extern.go10
-rw-r--r--src/runtime/hashmap.go25
-rw-r--r--src/runtime/hashmap_fast.go37
-rw-r--r--src/runtime/heapdump.go25
-rw-r--r--src/runtime/iface.go5
-rw-r--r--src/runtime/internal/sys/arch_386.go19
-rw-r--r--src/runtime/internal/sys/arch_amd64.go19
-rw-r--r--src/runtime/internal/sys/arch_amd64p32.go19
-rw-r--r--src/runtime/internal/sys/arch_arm.go19
-rw-r--r--src/runtime/internal/sys/arch_arm64.go19
-rw-r--r--src/runtime/internal/sys/arch_ppc64.go19
-rw-r--r--src/runtime/internal/sys/arch_ppc64le.go19
-rw-r--r--src/runtime/internal/sys/gengoos.go (renamed from src/runtime/gengoos.go)14
-rw-r--r--src/runtime/internal/sys/stubs.go11
-rw-r--r--src/runtime/internal/sys/sys.go15
-rw-r--r--src/runtime/internal/sys/zgoarch_386.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_amd64.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_amd64p32.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_arm.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_arm64.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_ppc64.go26
-rw-r--r--src/runtime/internal/sys/zgoarch_ppc64le.go26
-rw-r--r--src/runtime/internal/sys/zgoos_android.go17
-rw-r--r--src/runtime/internal/sys/zgoos_darwin.go17
-rw-r--r--src/runtime/internal/sys/zgoos_dragonfly.go17
-rw-r--r--src/runtime/internal/sys/zgoos_freebsd.go17
-rw-r--r--src/runtime/internal/sys/zgoos_linux.go19
-rw-r--r--src/runtime/internal/sys/zgoos_nacl.go17
-rw-r--r--src/runtime/internal/sys/zgoos_netbsd.go17
-rw-r--r--src/runtime/internal/sys/zgoos_openbsd.go17
-rw-r--r--src/runtime/internal/sys/zgoos_plan9.go17
-rw-r--r--src/runtime/internal/sys/zgoos_solaris.go17
-rw-r--r--src/runtime/internal/sys/zgoos_windows.go17
-rw-r--r--src/runtime/malloc.go25
-rw-r--r--src/runtime/mbarrier.go17
-rw-r--r--src/runtime/mbitmap.go147
-rw-r--r--src/runtime/mem_bsd.go7
-rw-r--r--src/runtime/mem_linux.go25
-rw-r--r--src/runtime/mfinal.go21
-rw-r--r--src/runtime/mgc.go11
-rw-r--r--src/runtime/mgcmark.go33
-rw-r--r--src/runtime/mgcsweep.go9
-rw-r--r--src/runtime/mgcwork.go5
-rw-r--r--src/runtime/mheap.go25
-rw-r--r--src/runtime/mstats.go5
-rw-r--r--src/runtime/mstkbar.go17
-rw-r--r--src/runtime/os1_freebsd.go7
-rw-r--r--src/runtime/os1_linux.go11
-rw-r--r--src/runtime/os3_plan9.go11
-rw-r--r--src/runtime/os_linux_386.go7
-rw-r--r--src/runtime/os_linux_arm.go7
-rw-r--r--src/runtime/parfor.go7
-rw-r--r--src/runtime/proc.go37
-rw-r--r--src/runtime/runtime1.go9
-rw-r--r--src/runtime/runtime2.go15
-rw-r--r--src/runtime/select.go7
-rw-r--r--src/runtime/sema.go3
-rw-r--r--src/runtime/signal1_unix.go4
-rw-r--r--src/runtime/signal_386.go11
-rw-r--r--src/runtime/signal_amd64x.go7
-rw-r--r--src/runtime/signal_arm64.go7
-rw-r--r--src/runtime/signal_linux_386.go7
-rw-r--r--src/runtime/signal_linux_amd64.go7
-rw-r--r--src/runtime/signal_linux_arm.go7
-rw-r--r--src/runtime/signal_linux_arm64.go7
-rw-r--r--src/runtime/signal_linux_ppc64x.go7
-rw-r--r--src/runtime/signal_ppc64x.go7
-rw-r--r--src/runtime/stack.go29
-rw-r--r--src/runtime/stubs.go6
-rw-r--r--src/runtime/symtab.go15
-rw-r--r--src/runtime/sys_x86.go11
-rw-r--r--src/runtime/trace.go9
-rw-r--r--src/runtime/traceback.go35
-rw-r--r--src/runtime/vdso_linux_amd64.go7
-rw-r--r--src/runtime/zgoos_android.go17
-rw-r--r--src/runtime/zgoos_darwin.go17
-rw-r--r--src/runtime/zgoos_dragonfly.go17
-rw-r--r--src/runtime/zgoos_freebsd.go17
-rw-r--r--src/runtime/zgoos_linux.go19
-rw-r--r--src/runtime/zgoos_nacl.go17
-rw-r--r--src/runtime/zgoos_netbsd.go17
-rw-r--r--src/runtime/zgoos_openbsd.go17
-rw-r--r--src/runtime/zgoos_plan9.go17
-rw-r--r--src/runtime/zgoos_solaris.go17
-rw-r--r--src/runtime/zgoos_windows.go17
101 files changed, 1049 insertions, 749 deletions
diff --git a/.gitignore b/.gitignore
index 6a3ce32ec8..de48481bf1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,7 +29,7 @@ src/cmd/cgo/zdefaultcc.go
src/cmd/go/zdefaultcc.go
src/cmd/internal/obj/zbootstrap.go
src/go/doc/headscan
-src/runtime/zversion.go
+src/runtime/internal/sys/zversion.go
src/unicode/maketables
src/*.*/
test/pass.out
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index 989d3596da..35e06b9e7e 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -31,7 +31,7 @@ import (
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{"runtime/internal/atomic", "runtime", "runtime/race", "runtime/msan"}
+var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan"}
// Only insert racefuncenter/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index a30cd862b5..3fdefbc890 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -463,7 +463,7 @@ var deptab = []struct {
{"cmd/go", []string{
"zdefaultcc.go",
}},
- {"runtime", []string{
+ {"runtime/internal/sys", []string{
"zversion.go",
}},
}
diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go
index 1e7b4a7f62..c1a9b817f8 100644
--- a/src/cmd/dist/buildruntime.go
+++ b/src/cmd/dist/buildruntime.go
@@ -17,23 +17,23 @@ import (
// mkzversion writes zversion.go:
//
// package runtime
-// const defaultGoroot = <goroot>
-// const theVersion = <version>
-// const goexperiment = <goexperiment>
-// const stackGuardMultiplier = <multiplier value>
-// const buildVersion = <build version>
+// const DefaultGoroot = <goroot>
+// const TheVersion = <version>
+// const Goexperiment = <goexperiment>
+// const StackGuardMultiplier = <multiplier value>
+// const BuildVersion = <build version>
//
func mkzversion(dir, file string) {
out := fmt.Sprintf(
"// auto generated by go tool dist\n"+
"\n"+
- "package runtime\n"+
+ "package sys\n"+
"\n"+
- "const defaultGoroot = `%s`\n"+
- "const theVersion = `%s`\n"+
- "const goexperiment = `%s`\n"+
- "const stackGuardMultiplier = %d\n\n"+
- "var buildVersion = theVersion\n", goroot_final, findgoversion(), os.Getenv("GOEXPERIMENT"), stackGuardMultiplier())
+ "const DefaultGoroot = `%s`\n"+
+ "const TheVersion = `%s`\n"+
+ "const Goexperiment = `%s`\n"+
+ "const StackGuardMultiplier = %d\n\n"+
+ "var BuildVersion = TheVersion\n", goroot_final, findgoversion(), os.Getenv("GOEXPERIMENT"), stackGuardMultiplier())
writefile(out, file, writeSkipSame)
}
diff --git a/src/cmd/dist/deps.go b/src/cmd/dist/deps.go
index 3859977607..4e20b5600b 100644
--- a/src/cmd/dist/deps.go
+++ b/src/cmd/dist/deps.go
@@ -3,56 +3,57 @@
package main
var builddeps = map[string][]string{
- "bufio": {"bytes", "errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "bytes": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "container/heap": {"runtime", "runtime/internal/atomic", "sort"},
- "crypto": {"errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "crypto/sha1": {"crypto", "errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "debug/dwarf": {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "debug/elf": {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "encoding": {"runtime", "runtime/internal/atomic"},
- "encoding/base64": {"errors", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "encoding/binary": {"errors", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "encoding/json": {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "errors": {"runtime", "runtime/internal/atomic"},
- "flag": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "fmt": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "go/ast": {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/build": {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/doc": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/parser": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/scanner": {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/token": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "hash": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
- "internal/singleflight": {"runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
- "internal/syscall/windows": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "internal/syscall/windows/registry": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "io": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
- "io/ioutil": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "log": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "math": {"runtime", "runtime/internal/atomic"},
- "net/url": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "os": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "os/exec": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "os/signal": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "path": {"errors", "io", "runtime", "runtime/internal/atomic", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "path/filepath": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "reflect": {"errors", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "regexp": {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "regexp/syntax": {"bytes", "errors", "io", "math", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "runtime": {"runtime/internal/atomic"},
+ "bufio": {"bytes", "errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "bytes": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "container/heap": {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort"},
+ "crypto": {"errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "crypto/sha1": {"crypto", "errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "debug/dwarf": {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "debug/elf": {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "encoding": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "encoding/base64": {"errors", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "encoding/binary": {"errors", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "encoding/json": {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "errors": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "flag": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "fmt": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "go/ast": {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/build": {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/doc": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/parser": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/scanner": {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/token": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "hash": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+ "internal/singleflight": {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+ "internal/syscall/windows": {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "internal/syscall/windows/registry": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "io": {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+ "io/ioutil": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "log": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "math": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "net/url": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "os": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "os/exec": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "os/signal": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "path": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "path/filepath": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "reflect": {"errors", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "regexp": {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "regexp/syntax": {"bytes", "errors", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "runtime": {"runtime/internal/atomic", "runtime/internal/sys"},
"runtime/internal/atomic": {},
- "sort": {"runtime", "runtime/internal/atomic"},
- "strconv": {"errors", "math", "runtime", "runtime/internal/atomic", "unicode/utf8"},
- "strings": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "sync": {"runtime", "runtime/internal/atomic", "sync/atomic"},
- "sync/atomic": {"runtime", "runtime/internal/atomic"},
- "syscall": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode/utf16"},
- "text/template": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "text/template/parse": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "time": {"errors", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "unicode": {"runtime", "runtime/internal/atomic"},
- "unicode/utf16": {"runtime", "runtime/internal/atomic"},
- "unicode/utf8": {"runtime", "runtime/internal/atomic"},
- "cmd/go": {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "runtime/internal/sys": {},
+ "sort": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "strconv": {"errors", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "unicode/utf8"},
+ "strings": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "sync": {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync/atomic"},
+ "sync/atomic": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "syscall": {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode/utf16"},
+ "text/template": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "text/template/parse": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "time": {"errors", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "unicode": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "unicode/utf16": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "unicode/utf8": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+ "cmd/go": {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index c3aba7c584..4575b44260 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -36,7 +36,8 @@ var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages.
"errors": {},
"io": {"errors", "sync"},
- "runtime": {"unsafe", "runtime/internal/atomic"},
+ "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
+ "runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe"},
"sync": {"runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 95173495c3..9ea0eb0187 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
- c0 = uintptr((8-ptrSize)/4*2860486313 + (ptrSize-4)/4*33054211828000289)
- c1 = uintptr((8-ptrSize)/4*3267000013 + (ptrSize-4)/4*23344194077549503)
+ c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
+ c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
)
// type algorithms - known to compiler
@@ -301,7 +304,7 @@ func memclrBytes(b []byte) {
memclr(s.array, uintptr(s.len))
}
-const hashRandomBytes = ptrSize / 4 * 64
+const hashRandomBytes = sys.PtrSize / 4 * 64
// used in asm_{386,amd64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
@@ -324,7 +327,7 @@ func init() {
getRandomData(aeskeysched[:])
return
}
- getRandomData((*[len(hashkey) * ptrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
hashkey[0] |= 1 // make sure these numbers are odd
hashkey[1] |= 1
hashkey[2] |= 1
diff --git a/src/runtime/arch_386.go b/src/runtime/arch_386.go
deleted file mode 100644
index 75e94eccfd..0000000000
--- a/src/runtime/arch_386.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '8'
- _BigEndian = 0
- _CacheLineSize = 64
- _PhysPageSize = goos_nacl*65536 + (1-goos_nacl)*4096 // 4k normally; 64k on NaCl
- _PCQuantum = 1
- _Int64Align = 4
- hugePageSize = 1 << 21
- minFrameSize = 0
-)
-
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_amd64.go b/src/runtime/arch_amd64.go
deleted file mode 100644
index d7721f74a1..0000000000
--- a/src/runtime/arch_amd64.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '6'
- _BigEndian = 0
- _CacheLineSize = 64
- _PhysPageSize = 4096
- _PCQuantum = 1
- _Int64Align = 8
- hugePageSize = 1 << 21
- minFrameSize = 0
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_amd64p32.go b/src/runtime/arch_amd64p32.go
deleted file mode 100644
index aa8343ac22..0000000000
--- a/src/runtime/arch_amd64p32.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '6'
- _BigEndian = 0
- _CacheLineSize = 64
- _PhysPageSize = 65536*goos_nacl + 4096*(1-goos_nacl)
- _PCQuantum = 1
- _Int64Align = 8
- hugePageSize = 1 << 21
- minFrameSize = 0
-)
-
-type uintreg uint64
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_arm.go b/src/runtime/arch_arm.go
deleted file mode 100644
index aa3e180c57..0000000000
--- a/src/runtime/arch_arm.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '5'
- _BigEndian = 0
- _CacheLineSize = 32
- _PhysPageSize = 65536*goos_nacl + 4096*(1-goos_nacl)
- _PCQuantum = 4
- _Int64Align = 4
- hugePageSize = 0
- minFrameSize = 4
-)
-
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_arm64.go b/src/runtime/arch_arm64.go
deleted file mode 100644
index f01c26d5ae..0000000000
--- a/src/runtime/arch_arm64.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '7'
- _BigEndian = 0
- _CacheLineSize = 32
- _PhysPageSize = 65536
- _PCQuantum = 4
- _Int64Align = 8
- hugePageSize = 0
- minFrameSize = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_ppc64.go b/src/runtime/arch_ppc64.go
deleted file mode 100644
index 273cc564ed..0000000000
--- a/src/runtime/arch_ppc64.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '9'
- _BigEndian = 1
- _CacheLineSize = 64
- _PhysPageSize = 65536
- _PCQuantum = 4
- _Int64Align = 8
- hugePageSize = 0
- minFrameSize = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_ppc64le.go b/src/runtime/arch_ppc64le.go
deleted file mode 100644
index e4eb9e5d8a..0000000000
--- a/src/runtime/arch_ppc64le.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- thechar = '9'
- _BigEndian = 0
- _CacheLineSize = 64
- _PhysPageSize = 65536
- _PCQuantum = 4
- _Int64Align = 8
- hugePageSize = 0
- minFrameSize = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index a01548a32f..08be142785 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -79,7 +79,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// Call from Go to C.
//go:nosplit
@@ -220,22 +223,22 @@ func cgocallbackg1() {
case "arm":
// On arm, stack frame is two words and there's a saved LR between
// SP and the stack frame and between the stack frame and the arguments.
- cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
case "arm64":
// On arm64, stack frame is four words and there's a saved LR between
// SP and the stack frame and between the stack frame and the arguments.
- cb = (*args)(unsafe.Pointer(sp + 5*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 5*sys.PtrSize))
case "amd64":
// On amd64, stack frame is one word, plus caller PC.
if framepointer_enabled {
// In this case, there's also saved BP.
- cb = (*args)(unsafe.Pointer(sp + 3*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
break
}
- cb = (*args)(unsafe.Pointer(sp + 2*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 2*sys.PtrSize))
case "386":
// On 386, stack frame is three words, plus caller PC.
- cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
case "ppc64", "ppc64le":
// On ppc64, the callback arguments are in the arguments area of
// cgocallback's stack frame. The stack looks like this:
@@ -252,7 +255,7 @@ func cgocallbackg1() {
// | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
// | | fixed frame area |
// +--------------------+------------------------------+ <- sp
- cb = (*args)(unsafe.Pointer(sp + 2*minFrameSize + 2*ptrSize))
+ cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
}
// Invoke callback.
@@ -291,7 +294,7 @@ func unwindm(restore *bool) {
default:
throw("unwindm not implemented")
case "386", "amd64", "arm", "ppc64", "ppc64le":
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + minFrameSize))
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
case "arm64":
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
}
@@ -437,7 +440,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
if inheap(uintptr(unsafe.Pointer(it))) {
panic(errorString(cgoCheckPointerFail))
}
- p = *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + ptrSize))
+ p = *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + sys.PtrSize))
if !cgoIsGoPointer(p) {
return
}
@@ -505,9 +508,9 @@ func cgoCheckUnknownPointer(p unsafe.Pointer) {
return
}
n := span.elemsize
- for i := uintptr(0); i < n; i += ptrSize {
+ for i := uintptr(0); i < n; i += sys.PtrSize {
bits := hbits.bits()
- if i >= 2*ptrSize && bits&bitMarked == 0 {
+ if i >= 2*sys.PtrSize && bits&bitMarked == 0 {
// No more possible pointers.
break
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index ad2bf1c628..6a4eae607a 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -8,6 +8,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -115,7 +116,7 @@ func GostringW(w []uint16) (s string) {
var Gostringnocopy = gostringnocopy
var Maxstring = &maxstring
-type Uintreg uintreg
+type Uintreg sys.Uintreg
var Open = open
var Close = closefd
@@ -125,7 +126,7 @@ var Write = write
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
-var BigEndian = _BigEndian
+var BigEndian = sys.BigEndian
// For benchmarking.
@@ -156,7 +157,7 @@ func BenchSetType(n int, x interface{}) {
})
}
-const PtrSize = ptrSize
+const PtrSize = sys.PtrSize
var TestingAssertE2I2GC = &testingAssertE2I2GC
var TestingAssertE2T2GC = &testingAssertE2T2GC
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index 5a5d432f62..564318c7cd 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -130,6 +130,8 @@ of the run-time system.
*/
package runtime
+import "runtime/internal/sys"
+
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
@@ -199,20 +201,20 @@ func GOROOT() string {
if s != "" {
return s
}
- return defaultGoroot
+ return sys.DefaultGoroot
}
// Version returns the Go tree's version string.
// It is either the commit hash and date at the time of the build or,
// when possible, a release tag like "go1.3".
func Version() string {
- return theVersion
+ return sys.TheVersion
}
// GOOS is the running program's operating system target:
// one of darwin, freebsd, linux, and so on.
-const GOOS string = theGoos
+const GOOS string = sys.TheGoos
// GOARCH is the running program's architecture target:
// 386, amd64, or arm.
-const GOARCH string = theGoarch
+const GOARCH string = sys.TheGoarch
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go
index 667367891c..056396c518 100644
--- a/src/runtime/hashmap.go
+++ b/src/runtime/hashmap.go
@@ -55,6 +55,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -96,7 +97,7 @@ const (
oldIterator = 2 // there may be an iterator using oldbuckets
// sentinel bucket ID for iterator checks
- noCheck = 1<<(8*ptrSize) - 1
+ noCheck = 1<<(8*sys.PtrSize) - 1
)
// A header for a Go map.
@@ -160,7 +161,7 @@ func evacuated(b *bmap) bool {
}
func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize))
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
}
func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
@@ -168,7 +169,7 @@ func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
h.createOverflow()
*h.overflow[0] = append(*h.overflow[0], ovf)
}
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize)) = ovf
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
}
func (h *hmap) createOverflow() {
@@ -201,11 +202,11 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
}
// check compiler's and reflect's math
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
+ if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
+ if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
throw("value size wrong")
}
@@ -293,7 +294,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
b = oldb
}
}
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -344,7 +345,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
b = oldb
}
}
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -387,7 +388,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
b = oldb
}
}
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -444,7 +445,7 @@ again:
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -541,7 +542,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -594,7 +595,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
return
}
- if unsafe.Sizeof(hiter{})/ptrSize != 12 {
+ if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
}
it.t = t
@@ -865,7 +866,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
} else {
hash &^= newbit
}
- top = uint8(hash >> (ptrSize*8 - 8))
+ top = uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go
index 9f310f8bf3..454256381f 100644
--- a/src/runtime/hashmap_fast.go
+++ b/src/runtime/hashmap_fast.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -196,12 +197,12 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if x == empty {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr))
@@ -213,12 +214,12 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if x == empty {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
// check first 4 bytes
// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
@@ -237,9 +238,9 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr))
@@ -254,7 +255,7 @@ dohash:
b = oldb
}
}
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -264,12 +265,12 @@ dohash:
if x != top {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
}
b = b.overflow(t)
@@ -298,12 +299,12 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if x == empty {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
@@ -315,12 +316,12 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if x == empty {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -337,9 +338,9 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
@@ -354,7 +355,7 @@ dohash:
b = oldb
}
}
- top := uint8(hash >> (ptrSize*8 - 8))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -364,12 +365,12 @@ dohash:
if x != top {
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
}
b = b.overflow(t)
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 0a62c6731d..dfceba3376 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -11,7 +11,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func runtime_debug_WriteHeapDump(fd uintptr) {
@@ -233,7 +236,7 @@ func dumpbv(cbv *bitvector, offset uintptr) {
for i := uintptr(0); i < uintptr(bv.n); i++ {
if bv.bytedata[i/8]>>(i%8)&1 == 1 {
dumpint(fieldKindPtr)
- dumpint(uint64(offset + i*ptrSize))
+ dumpint(uint64(offset + i*sys.PtrSize))
}
}
}
@@ -263,7 +266,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
var bv bitvector
if stkmap != nil && stkmap.n > 0 {
bv = stackmapdata(stkmap, pcdata)
- dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*ptrSize)))
+ dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*sys.PtrSize)))
} else {
bv.n = -1
}
@@ -288,7 +291,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
dumpbv(&child.args, child.argoff)
} else {
// conservative - everything might be a pointer
- for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
+ for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
@@ -297,21 +300,21 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
// Dump fields in the local vars section
if stkmap == nil {
// No locals information, dump everything.
- for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
+ for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n < 0 {
// Locals size information, dump just the locals.
size := uintptr(-stkmap.n)
- for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
- dumpbv(&bv, s.varp-uintptr(bv.n)*ptrSize-s.sp)
+ dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
}
dumpint(fieldKindEol)
@@ -489,11 +492,11 @@ func dumpparams() {
} else {
dumpbool(true) // big-endian ptrs
}
- dumpint(ptrSize)
+ dumpint(sys.PtrSize)
dumpint(uint64(mheap_.arena_start))
dumpint(uint64(mheap_.arena_used))
- dumpint(thechar)
- dumpstr(goexperiment)
+ dumpint(sys.TheChar)
+ dumpstr(sys.Goexperiment)
dumpint(uint64(ncpu))
}
@@ -704,7 +707,7 @@ func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
- nptr := size / ptrSize
+ nptr := size / sys.PtrSize
if uintptr(len(tmpbuf)) < nptr/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index d4e8b8e69f..71dc865e07 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -69,7 +70,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
}
}
- m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*ptrSize, 0, &memstats.other_sys))
+ m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
@@ -90,7 +91,7 @@ search:
t := &x.mhdr[j]
if t.mtyp == itype && (t.name == iname || *t.name == *iname) && t.pkgpath == ipkgpath {
if m != nil {
- *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*ptrSize)) = t.ifn
+ *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
}
goto nextimethod
}
diff --git a/src/runtime/internal/sys/arch_386.go b/src/runtime/internal/sys/arch_386.go
new file mode 100644
index 0000000000..15c8e840eb
--- /dev/null
+++ b/src/runtime/internal/sys/arch_386.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '8'
+ BigEndian = 0
+ CacheLineSize = 64
+ PhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
+ PCQuantum = 1
+ Int64Align = 4
+ HugePageSize = 1 << 21
+ MinFrameSize = 0
+)
+
+type Uintreg uint32
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_amd64.go b/src/runtime/internal/sys/arch_amd64.go
new file mode 100644
index 0000000000..bc9002cc71
--- /dev/null
+++ b/src/runtime/internal/sys/arch_amd64.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '6'
+ BigEndian = 0
+ CacheLineSize = 64
+ PhysPageSize = 4096
+ PCQuantum = 1
+ Int64Align = 8
+ HugePageSize = 1 << 21
+ MinFrameSize = 0
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_amd64p32.go b/src/runtime/internal/sys/arch_amd64p32.go
new file mode 100644
index 0000000000..d7c185f168
--- /dev/null
+++ b/src/runtime/internal/sys/arch_amd64p32.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '6'
+ BigEndian = 0
+ CacheLineSize = 64
+ PhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
+ PCQuantum = 1
+ Int64Align = 8
+ HugePageSize = 1 << 21
+ MinFrameSize = 0
+)
+
+type Uintreg uint64
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_arm.go b/src/runtime/internal/sys/arch_arm.go
new file mode 100644
index 0000000000..d395ac5fa8
--- /dev/null
+++ b/src/runtime/internal/sys/arch_arm.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '5'
+ BigEndian = 0
+ CacheLineSize = 32
+ PhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
+ PCQuantum = 4
+ Int64Align = 4
+ HugePageSize = 0
+ MinFrameSize = 4
+)
+
+type Uintreg uint32
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_arm64.go b/src/runtime/internal/sys/arch_arm64.go
new file mode 100644
index 0000000000..bd7e41d97e
--- /dev/null
+++ b/src/runtime/internal/sys/arch_arm64.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '7'
+ BigEndian = 0
+ CacheLineSize = 32
+ PhysPageSize = 65536
+ PCQuantum = 4
+ Int64Align = 8
+ HugePageSize = 0
+ MinFrameSize = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_ppc64.go b/src/runtime/internal/sys/arch_ppc64.go
new file mode 100644
index 0000000000..9b13415dfc
--- /dev/null
+++ b/src/runtime/internal/sys/arch_ppc64.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '9'
+ BigEndian = 1
+ CacheLineSize = 64
+ PhysPageSize = 65536
+ PCQuantum = 4
+ Int64Align = 8
+ HugePageSize = 0
+ MinFrameSize = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_ppc64le.go b/src/runtime/internal/sys/arch_ppc64le.go
new file mode 100644
index 0000000000..db9b2aa32b
--- /dev/null
+++ b/src/runtime/internal/sys/arch_ppc64le.go
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = '9'
+ BigEndian = 0
+ CacheLineSize = 64
+ PhysPageSize = 65536
+ PCQuantum = 4
+ Int64Align = 8
+ HugePageSize = 0
+ MinFrameSize = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/gengoos.go b/src/runtime/internal/sys/gengoos.go
index 06621c8dba..55d991cac3 100644
--- a/src/runtime/gengoos.go
+++ b/src/runtime/internal/sys/gengoos.go
@@ -18,7 +18,7 @@ import (
var gooses, goarches []string
func main() {
- data, err := ioutil.ReadFile("../go/build/syslist.go")
+ data, err := ioutil.ReadFile("../../../go/build/syslist.go")
if err != nil {
log.Fatal(err)
}
@@ -49,14 +49,14 @@ func main() {
if target == "linux" {
fmt.Fprintf(&buf, "// +build !android\n\n") // must explicitly exclude android for linux
}
- fmt.Fprintf(&buf, "package runtime\n\n")
- fmt.Fprintf(&buf, "const theGoos = `%s`\n\n", target)
+ fmt.Fprintf(&buf, "package sys\n\n")
+ fmt.Fprintf(&buf, "const TheGoos = `%s`\n\n", target)
for _, goos := range gooses {
value := 0
if goos == target {
value = 1
}
- fmt.Fprintf(&buf, "const goos_%s = %d\n", goos, value)
+ fmt.Fprintf(&buf, "const Goos%s = %d\n", strings.Title(goos), value)
}
err := ioutil.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666)
if err != nil {
@@ -67,14 +67,14 @@ func main() {
for _, target := range goarches {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// generated by gengoos.go using 'go generate'\n\n")
- fmt.Fprintf(&buf, "package runtime\n\n")
- fmt.Fprintf(&buf, "const theGoarch = `%s`\n\n", target)
+ fmt.Fprintf(&buf, "package sys\n\n")
+ fmt.Fprintf(&buf, "const TheGoarch = `%s`\n\n", target)
for _, goarch := range goarches {
value := 0
if goarch == target {
value = 1
}
- fmt.Fprintf(&buf, "const goarch_%s = %d\n", goarch, value)
+ fmt.Fprintf(&buf, "const Goarch%s = %d\n", strings.Title(goarch), value)
}
err := ioutil.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
if err != nil {
diff --git a/src/runtime/internal/sys/stubs.go b/src/runtime/internal/sys/stubs.go
new file mode 100644
index 0000000000..0a94502f32
--- /dev/null
+++ b/src/runtime/internal/sys/stubs.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+// Declarations for runtime services implemented in C or assembly.
+
+const PtrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
+const RegSize = 4 << (^Uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
+const SpAlign = 1*(1-GoarchArm64) + 16*GoarchArm64 // SP alignment: 1 normally, 16 for ARM64
diff --git a/src/runtime/internal/sys/sys.go b/src/runtime/internal/sys/sys.go
new file mode 100644
index 0000000000..15ad7f5d70
--- /dev/null
+++ b/src/runtime/internal/sys/sys.go
@@ -0,0 +1,15 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package sys contains system- and configuration- and architecture-specific
+// constants used by the runtime.
+package sys
+
+// The next line makes 'go generate' write the zgen_*.go files with
+// per-OS and per-arch information, including constants
+// named goos_$GOOS and goarch_$GOARCH for every
+// known GOOS and GOARCH. The constant is 1 on the
+// current system, 0 otherwise; multiplying by them is
+// useful for defining GOOS- or GOARCH-specific constants.
+//go:generate go run gengoos.go
diff --git a/src/runtime/internal/sys/zgoarch_386.go b/src/runtime/internal/sys/zgoarch_386.go
new file mode 100644
index 0000000000..3ad244509d
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_386.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `386`
+
+const Goarch386 = 1
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_amd64.go b/src/runtime/internal/sys/zgoarch_amd64.go
new file mode 100644
index 0000000000..7c858e3f5d
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_amd64.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `amd64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 1
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_amd64p32.go b/src/runtime/internal/sys/zgoarch_amd64p32.go
new file mode 100644
index 0000000000..772031c090
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_amd64p32.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `amd64p32`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 1
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm.go b/src/runtime/internal/sys/zgoarch_arm.go
new file mode 100644
index 0000000000..276e8a869b
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_arm.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `arm`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 1
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm64.go b/src/runtime/internal/sys/zgoarch_arm64.go
new file mode 100644
index 0000000000..d124ec0343
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_arm64.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `arm64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 1
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64.go b/src/runtime/internal/sys/zgoarch_ppc64.go
new file mode 100644
index 0000000000..06f78b2023
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_ppc64.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `ppc64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 1
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64le.go b/src/runtime/internal/sys/zgoarch_ppc64le.go
new file mode 100644
index 0000000000..50b56dbe3f
--- /dev/null
+++ b/src/runtime/internal/sys/zgoarch_ppc64le.go
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `ppc64le`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 1
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoos_android.go b/src/runtime/internal/sys/zgoos_android.go
new file mode 100644
index 0000000000..03d91760ed
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_android.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `android`
+
+const GoosAndroid = 1
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_darwin.go b/src/runtime/internal/sys/zgoos_darwin.go
new file mode 100644
index 0000000000..eb2efeb7af
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_darwin.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `darwin`
+
+const GoosAndroid = 0
+const GoosDarwin = 1
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_dragonfly.go b/src/runtime/internal/sys/zgoos_dragonfly.go
new file mode 100644
index 0000000000..403cf65311
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_dragonfly.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `dragonfly`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 1
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_freebsd.go b/src/runtime/internal/sys/zgoos_freebsd.go
new file mode 100644
index 0000000000..632d5db9db
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_freebsd.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `freebsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 1
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_linux.go b/src/runtime/internal/sys/zgoos_linux.go
new file mode 100644
index 0000000000..2d43869a84
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_linux.go
@@ -0,0 +1,19 @@
+// generated by gengoos.go using 'go generate'
+
+// +build !android
+
+package sys
+
+const TheGoos = `linux`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 1
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_nacl.go b/src/runtime/internal/sys/zgoos_nacl.go
new file mode 100644
index 0000000000..a56b6ef3c9
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_nacl.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `nacl`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 1
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_netbsd.go b/src/runtime/internal/sys/zgoos_netbsd.go
new file mode 100644
index 0000000000..46fd0a7cd5
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_netbsd.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `netbsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 1
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_openbsd.go b/src/runtime/internal/sys/zgoos_openbsd.go
new file mode 100644
index 0000000000..7ee650afbb
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_openbsd.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `openbsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 1
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_plan9.go b/src/runtime/internal/sys/zgoos_plan9.go
new file mode 100644
index 0000000000..162e7f6260
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_plan9.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `plan9`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 1
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_solaris.go b/src/runtime/internal/sys/zgoos_solaris.go
new file mode 100644
index 0000000000..b2a8f98504
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_solaris.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `solaris`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 1
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_windows.go b/src/runtime/internal/sys/zgoos_windows.go
new file mode 100644
index 0000000000..817ec79e4c
--- /dev/null
+++ b/src/runtime/internal/sys/zgoos_windows.go
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `windows`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 1
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index efaa46f352..6430511d7d 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -79,7 +79,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
debugMalloc = false
@@ -143,7 +146,7 @@ const (
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - ptrSize/4*goos_windows - 1*goos_plan9
+ _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
// Number of bits in page to span calculations (4k pages).
// On Windows 64-bit we limit the arena to 32GB or 35 bits.
@@ -155,7 +158,7 @@ const (
// On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
// but as most devices have less than 4GB of physical memory anyway, we
// try to be conservative here, and only ask for a 2GB heap.
- _MHeapMap_TotalBits = (_64bit*goos_windows)*35 + (_64bit*(1-goos_windows)*(1-goos_darwin*goarch_arm64))*39 + goos_darwin*goarch_arm64*31 + (1-_64bit)*32
+ _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
_MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
@@ -228,7 +231,7 @@ func mallocinit() {
// Set up the allocation arena, a contiguous area of memory where
// allocated data will be found. The arena begins with a bitmap large
// enough to hold 4 bits per allocated word.
- if ptrSize == 8 && (limit == 0 || limit > 1<<30) {
+ if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
// On a 64-bit machine, allocate from a single contiguous reservation.
// 512 GB (MaxMem) should be big enough for now.
//
@@ -259,8 +262,8 @@ func mallocinit() {
// translation buffers, the user address space is limited to 39 bits
// On darwin/arm64, the address space is even smaller.
arenaSize := round(_MaxMem, _PageSize)
- bitmapSize = arenaSize / (ptrSize * 8 / 4)
- spansSize = arenaSize / _PageSize * ptrSize
+ bitmapSize = arenaSize / (sys.PtrSize * 8 / 4)
+ spansSize = arenaSize / _PageSize * sys.PtrSize
spansSize = round(spansSize, _PageSize)
for i := 0; i <= 0x7f; i++ {
switch {
@@ -308,12 +311,12 @@ func mallocinit() {
}
for _, arenaSize := range arenaSizes {
- bitmapSize = _MaxArena32 / (ptrSize * 8 / 4)
- spansSize = _MaxArena32 / _PageSize * ptrSize
+ bitmapSize = _MaxArena32 / (sys.PtrSize * 8 / 4)
+ spansSize = _MaxArena32 / _PageSize * sys.PtrSize
if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
arenaSize = bitmapSize * 8
- spansSize = arenaSize / _PageSize * ptrSize
+ spansSize = arenaSize / _PageSize * sys.PtrSize
}
spansSize = round(spansSize, _PageSize)
@@ -368,7 +371,7 @@ func mallocinit() {
// needed. This doesn't work well with the "let the kernel pick an address"
// mode, so don't do that. Pick a high address instead.
func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
- if ptrSize == 4 {
+ if sys.PtrSize == 4 {
return sysReserve(nil, n, reserved)
}
@@ -642,7 +645,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
x = unsafe.Pointer(v)
if flags&flagNoZero == 0 {
v.ptr().next = 0
- if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
+ if size > 2*sys.PtrSize && ((*[2]uintptr)(x))[1] != 0 {
memclr(unsafe.Pointer(v), size)
}
}
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index f6e6c30648..f9553b9e14 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -13,7 +13,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// markwb is the mark-phase write barrier, the only barrier we have.
// The rest of this file exists only to make calls to this function.
@@ -128,7 +131,7 @@ func writebarrierptr(dst *uintptr, src uintptr) {
if !writeBarrierEnabled {
return
}
- if src != 0 && (src < _PhysPageSize || src == poisonStack) {
+ if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
systemstack(func() {
print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
throw("bad pointer in write barrier")
@@ -144,7 +147,7 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
if !writeBarrierEnabled {
return
}
- if src != 0 && (src < _PhysPageSize || src == poisonStack) {
+ if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
systemstack(func() { throw("bad pointer in write barrier") })
}
writebarrierptr_nostore1(dst, src)
@@ -195,15 +198,15 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
memmove(dst, src, size)
- if !writeBarrierEnabled || typ.kind&kindNoPointers != 0 || size < ptrSize || !inheap(uintptr(dst)) {
+ if !writeBarrierEnabled || typ.kind&kindNoPointers != 0 || size < sys.PtrSize || !inheap(uintptr(dst)) {
return
}
- if frag := -off & (ptrSize - 1); frag != 0 {
+ if frag := -off & (sys.PtrSize - 1); frag != 0 {
dst = add(dst, frag)
size -= frag
}
- heapBitsBulkBarrier(uintptr(dst), size&^(ptrSize-1))
+ heapBitsBulkBarrier(uintptr(dst), size&^(sys.PtrSize-1))
}
// callwritebarrier is invoked at the end of reflectcall, to execute
@@ -215,7 +218,7 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
// not to be preempted before the write barriers have been run.
//go:nosplit
func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
- if !writeBarrierEnabled || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < ptrSize || !inheap(uintptr(frame)) {
+ if !writeBarrierEnabled || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize || !inheap(uintptr(frame)) {
return
}
heapBitsBulkBarrier(uintptr(add(frame, retoffset)), framesize-retoffset)
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index ba123eafea..335d1d8251 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -68,6 +68,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -75,8 +76,8 @@ const (
bitPointer = 1 << 0
bitMarked = 1 << 4
- heapBitsShift = 1 // shift offset between successive bitPointer or bitMarked entries
- heapBitmapScale = ptrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
+ heapBitsShift = 1 // shift offset between successive bitPointer or bitMarked entries
+ heapBitmapScale = sys.PtrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
// all mark/pointer bits in a byte
bitMarkedAll = bitMarked | bitMarked<<heapBitsShift | bitMarked<<(2*heapBitsShift) | bitMarked<<(3*heapBitsShift)
@@ -139,7 +140,7 @@ func (h *mheap) mapBits(arena_used uintptr) {
n := (arena_used - mheap_.arena_start) / heapBitmapScale
n = round(n, bitmapChunk)
- n = round(n, _PhysPageSize)
+ n = round(n, sys.PhysPageSize)
if h.bitmap_mapped >= n {
return
}
@@ -164,7 +165,7 @@ type heapBits struct {
//go:nosplit
func heapBitsForAddr(addr uintptr) heapBits {
// 2 bits per work, 4 pairs per byte, and a mask is hard coded.
- off := (addr - mheap_.arena_start) / ptrSize
+ off := (addr - mheap_.arena_start) / sys.PtrSize
return heapBits{(*uint8)(unsafe.Pointer(mheap_.arena_start - off/4 - 1)), uint32(off & 3)}
}
@@ -328,7 +329,7 @@ func (h heapBits) isPointer() bool {
// far into the bitmap.
// h must describe the initial word of the object.
func (h heapBits) hasPointers(size uintptr) bool {
- if size == ptrSize { // 1-word objects are always pointers
+ if size == sys.PtrSize { // 1-word objects are always pointers
return true
}
// Otherwise, at least a 2-word object, and at least 2-word aligned,
@@ -339,7 +340,7 @@ func (h heapBits) hasPointers(size uintptr) bool {
if b&(bitPointer|bitPointer<<heapBitsShift) != 0 {
return true
}
- if size == 2*ptrSize {
+ if size == 2*sys.PtrSize {
return false
}
// At least a 4-word object. Check scan bit (aka marked bit) in third word.
@@ -354,7 +355,7 @@ func (h heapBits) hasPointers(size uintptr) bool {
// checkmark bit varies by size.
// h must describe the initial word of the object.
func (h heapBits) isCheckmarked(size uintptr) bool {
- if size == ptrSize {
+ if size == sys.PtrSize {
return (*h.bitp>>h.shift)&bitPointer != 0
}
// All multiword objects are 2-word aligned,
@@ -369,7 +370,7 @@ func (h heapBits) isCheckmarked(size uintptr) bool {
// checkmark bit varies by size.
// h must describe the initial word of the object.
func (h heapBits) setCheckmarked(size uintptr) {
- if size == ptrSize {
+ if size == sys.PtrSize {
atomic.Or8(h.bitp, bitPointer<<h.shift)
return
}
@@ -395,7 +396,7 @@ func (h heapBits) setCheckmarked(size uintptr) {
//
//go:nosplit
func heapBitsBulkBarrier(p, size uintptr) {
- if (p|size)&(ptrSize-1) != 0 {
+ if (p|size)&(sys.PtrSize-1) != 0 {
throw("heapBitsBulkBarrier: unaligned arguments")
}
if !writeBarrierEnabled {
@@ -430,7 +431,7 @@ func heapBitsBulkBarrier(p, size uintptr) {
}
h := heapBitsForAddr(p)
- for i := uintptr(0); i < size; i += ptrSize {
+ for i := uintptr(0); i < size; i += sys.PtrSize {
if h.isPointer() {
x := (*uintptr)(unsafe.Pointer(p + i))
writebarrierptr_nostore(x, *x)
@@ -470,8 +471,8 @@ func typeBitsBulkBarrier(typ *_type, p, size uintptr) {
}
ptrmask := typ.gcdata
var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += ptrSize {
- if i&(ptrSize*8-1) == 0 {
+ for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
+ if i&(sys.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
@@ -498,7 +499,7 @@ func (h heapBits) initSpan(size, n, total uintptr) {
throw("initSpan: unaligned length")
}
nbyte := total / heapBitmapScale
- if ptrSize == 8 && size == ptrSize {
+ if sys.PtrSize == 8 && size == sys.PtrSize {
end := h.bitp
bitp := subtractb(end, nbyte-1)
for {
@@ -517,7 +518,7 @@ func (h heapBits) initSpan(size, n, total uintptr) {
// It clears the checkmark bits, which are set to 1 in normal operation.
func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
- if ptrSize == 8 && size == ptrSize {
+ if sys.PtrSize == 8 && size == sys.PtrSize {
// Checkmark bit is type bit, bottom bit of every 2-bit entry.
// Only possible on 64-bit system, since minimum size is 8.
// Must clear type bit (checkmark bit) of every word.
@@ -531,7 +532,7 @@ func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
}
for i := uintptr(0); i < n; i++ {
*h.bitp &^= bitMarked << (heapBitsShift + h.shift)
- h = h.forward(size / ptrSize)
+ h = h.forward(size / sys.PtrSize)
}
}
@@ -541,7 +542,7 @@ func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
// but consulted by typedmemmove.)
func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
- if ptrSize == 8 && size == ptrSize {
+ if sys.PtrSize == 8 && size == sys.PtrSize {
// Checkmark bit is type bit, bottom bit of every 2-bit entry.
// Only possible on 64-bit system, since minimum size is 8.
// Must clear type bit (checkmark bit) of every word.
@@ -566,7 +567,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
switch {
default:
throw("heapBitsSweepSpan")
- case ptrSize == 8 && size == ptrSize:
+ case sys.PtrSize == 8 && size == sys.PtrSize:
// Consider mark bits in all four 2-bit entries of each bitmap byte.
bitp := h.bitp
for i := uintptr(0); i < n; i += 4 {
@@ -579,28 +580,28 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
if x&bitMarked != 0 {
x &^= bitMarked
} else {
- f(base + i*ptrSize)
+ f(base + i*sys.PtrSize)
}
if x&(bitMarked<<heapBitsShift) != 0 {
x &^= bitMarked << heapBitsShift
} else {
- f(base + (i+1)*ptrSize)
+ f(base + (i+1)*sys.PtrSize)
}
if x&(bitMarked<<(2*heapBitsShift)) != 0 {
x &^= bitMarked << (2 * heapBitsShift)
} else {
- f(base + (i+2)*ptrSize)
+ f(base + (i+2)*sys.PtrSize)
}
if x&(bitMarked<<(3*heapBitsShift)) != 0 {
x &^= bitMarked << (3 * heapBitsShift)
} else {
- f(base + (i+3)*ptrSize)
+ f(base + (i+3)*sys.PtrSize)
}
*bitp = uint8(x)
bitp = subtract1(bitp)
}
- case size%(4*ptrSize) == 0:
+ case size%(4*sys.PtrSize) == 0:
// Mark bit is in first word of each object.
// Each object starts at bit 0 of a heap bitmap byte.
bitp := h.bitp
@@ -617,7 +618,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
bitp = subtractb(bitp, step)
}
- case size%(4*ptrSize) == 2*ptrSize:
+ case size%(4*sys.PtrSize) == 2*sys.PtrSize:
// Mark bit is in first word of each object,
// but every other object starts halfway through a heap bitmap byte.
// Unroll loop 2x to handle alternating shift count and step size.
@@ -631,7 +632,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
} else {
x &^= bitMarked | bitPointer | (bitMarked|bitPointer)<<heapBitsShift
f(base + i*size)
- if size > 2*ptrSize {
+ if size > 2*sys.PtrSize {
x = 0
}
}
@@ -646,7 +647,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
} else {
x &^= (bitMarked|bitPointer)<<(2*heapBitsShift) | (bitMarked|bitPointer)<<(3*heapBitsShift)
f(base + (i+1)*size)
- if size > 2*ptrSize {
+ if size > 2*sys.PtrSize {
*subtract1(bitp) = 0
}
}
@@ -686,7 +687,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// The checks for size == ptrSize and size == 2*ptrSize can therefore
// assume that dataSize == size without checking it explicitly.
- if ptrSize == 8 && size == ptrSize {
+ if sys.PtrSize == 8 && size == sys.PtrSize {
// It's one word and it has pointers, it must be a pointer.
// In general we'd need an atomic update here if the
// concurrent GC were marking objects in this span,
@@ -712,8 +713,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// This is called out as a special case primarily for 32-bit systems,
// so that on 32-bit systems the code below can assume all objects
// are 4-word aligned (because they're all 16-byte aligned).
- if size == 2*ptrSize {
- if typ.size == ptrSize {
+ if size == 2*sys.PtrSize {
+ if typ.size == sys.PtrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
@@ -722,7 +723,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
- if ptrSize == 4 && dataSize == ptrSize {
+ if sys.PtrSize == 4 && dataSize == sys.PtrSize {
// 1 pointer.
if gcphase == _GCoff {
*h.bitp |= bitPointer << h.shift
@@ -741,7 +742,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
}
// Otherwise typ.size must be 2*ptrSize, and typ.kind&kindGCProg == 0.
if doubleCheck {
- if typ.size != 2*ptrSize || typ.kind&kindGCProg != 0 {
+ if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
@@ -842,8 +843,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
- const maxBits = ptrSize*8 - 7
- if typ.ptrdata/ptrSize <= maxBits {
+ const maxBits = sys.PtrSize*8 - 7
+ if typ.ptrdata/sys.PtrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
@@ -854,12 +855,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / ptrSize
+ nb = typ.ptrdata / sys.PtrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
- nb = typ.size / ptrSize
+ nb = typ.size / sys.PtrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
@@ -870,7 +871,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
pbits = b
endnb = nb
if nb+nb <= maxBits {
- for endnb <= ptrSize*8 {
+ for endnb <= sys.PtrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
@@ -887,9 +888,9 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/ptrSize+7)/8 - 1
+ n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
endp = addb(ptrmask, n)
- endnb = typ.size/ptrSize - n*8
+ endnb = typ.size/sys.PtrSize - n*8
}
}
if p != nil {
@@ -900,12 +901,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / ptrSize
+ nw = typ.ptrdata / sys.PtrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / ptrSize
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
@@ -945,7 +946,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
b >>= 4
nb -= 4
- case ptrSize == 8 && h.shift == 2:
+ case sys.PtrSize == 8 && h.shift == 2:
// Ptrmask and heap bitmap are misaligned.
// The bits for the first two words are in a byte shared with another object
// and must be updated atomically.
@@ -1058,7 +1059,7 @@ Phase3:
}
// Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / ptrSize
+ nw = size / sys.PtrSize
// Write whole bitmap bytes.
// The first is hb, the rest are zero.
@@ -1105,11 +1106,11 @@ Phase4:
// Double-check that bits to be written were written correctly.
// Does not check that other bits were not written, unfortunately.
h := heapBitsForAddr(x)
- nptr := typ.ptrdata / ptrSize
- ndata := typ.size / ptrSize
+ nptr := typ.ptrdata / sys.PtrSize
+ ndata := typ.size / sys.PtrSize
count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / ptrSize
- for i := uintptr(0); i < size/ptrSize; i++ {
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
+ for i := uintptr(0); i < size/sys.PtrSize; i++ {
j := i % ndata
var have, want uint8
have = (*h.bitp >> h.shift) & (bitPointer | bitMarked)
@@ -1137,7 +1138,7 @@ Phase4:
print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*ptrSize, "have", have, "want", want)
+ println("at word", i, "offset", i*sys.PtrSize, "have", have, "want", want)
if typ.kind&kindGCProg != 0 {
println("GC program:")
dumpGCProg(addb(typ.gcdata, 4))
@@ -1168,14 +1169,14 @@ var debugPtrmask struct {
// so that the relevant bitmap bytes are not shared with surrounding
// objects and need not be accessed with atomic instructions.
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if ptrSize == 8 && allocSize%(4*ptrSize) != 0 {
+ if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
// Alignment will be wrong.
throw("heapBitsSetTypeGCProg: small allocation")
}
var totalBits uintptr
if elemSize == dataSize {
totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*ptrSize != progSize {
+ if totalBits*sys.PtrSize != progSize {
println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
throw("heapBitsSetTypeGCProg: unexpected bit count")
}
@@ -1190,7 +1191,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeats that first element to fill the array.
var trailer [40]byte // 3 varints (max 10 each) + some bytes
i := 0
- if n := elemSize/ptrSize - progSize/ptrSize; n > 0 {
+ if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
// literal(0)
trailer[i] = 0x01
i++
@@ -1212,7 +1213,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeat(elemSize/ptrSize, count-1)
trailer[i] = 0x80
i++
- n := elemSize / ptrSize
+ n := elemSize / sys.PtrSize
for ; n >= 0x80; n >>= 7 {
trailer[i] = byte(n | 0x80)
i++
@@ -1236,7 +1237,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// last element. This will cause the code below to
// memclr the dead section of the final array element,
// so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / ptrSize
+ totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
}
endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4))
endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale))
@@ -1247,7 +1248,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/ptrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/ptrSize + 7) / 8
+ n := (size/sys.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, nil, &x[0], 1)
@@ -1385,7 +1386,7 @@ Run:
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
- const maxBits = ptrSize*8 - 7
+ const maxBits = sys.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
@@ -1438,7 +1439,7 @@ Run:
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
- for nb <= ptrSize*8 {
+ for nb <= sys.PtrSize*8 {
b |= b << nb
nb += nb
}
@@ -1627,7 +1628,7 @@ func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
func reflect_gcbits(x interface{}) []byte {
ret := getgcmask(x)
typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / ptrSize
+ nptr := typ.ptrdata / sys.PtrSize
for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
ret = ret[:len(ret)-1]
}
@@ -1645,10 +1646,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/ptrSize)
- for i := uintptr(0); i < n; i += ptrSize {
- off := (uintptr(p) + i - datap.data) / ptrSize
- mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/sys.PtrSize)
+ for i := uintptr(0); i < n; i += sys.PtrSize {
+ off := (uintptr(p) + i - datap.data) / sys.PtrSize
+ mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1657,10 +1658,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/ptrSize)
- for i := uintptr(0); i < n; i += ptrSize {
- off := (uintptr(p) + i - datap.bss) / ptrSize
- mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/sys.PtrSize)
+ for i := uintptr(0); i < n; i += sys.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / sys.PtrSize
+ mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1670,14 +1671,14 @@ func getgcmask(ep interface{}) (mask []byte) {
var n uintptr
var base uintptr
if mlookup(uintptr(p), &base, &n, nil) != 0 {
- mask = make([]byte, n/ptrSize)
- for i := uintptr(0); i < n; i += ptrSize {
+ mask = make([]byte, n/sys.PtrSize)
+ for i := uintptr(0); i < n; i += sys.PtrSize {
hbits := heapBitsForAddr(base + i)
if hbits.isPointer() {
- mask[i/ptrSize] = 1
+ mask[i/sys.PtrSize] = 1
}
- if i >= 2*ptrSize && !hbits.isMarked() {
- mask = mask[:i/ptrSize]
+ if i >= 2*sys.PtrSize && !hbits.isMarked() {
+ mask = mask[:i/sys.PtrSize]
break
}
}
@@ -1708,13 +1709,13 @@ func getgcmask(ep interface{}) (mask []byte) {
return
}
bv := stackmapdata(stkmap, pcdata)
- size := uintptr(bv.n) * ptrSize
+ size := uintptr(bv.n) * sys.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/ptrSize)
- for i := uintptr(0); i < n; i += ptrSize {
+ mask = make([]byte, n/sys.PtrSize)
+ for i := uintptr(0); i < n; i += sys.PtrSize {
bitmap := bv.bytedata
- off := (uintptr(p) + i - frame.varp + size) / ptrSize
- mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
+ mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
}
return
diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go
index ecab584d04..1e388ec728 100644
--- a/src/runtime/mem_bsd.go
+++ b/src/runtime/mem_bsd.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
@@ -43,7 +46,7 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// and check the assumption in SysMap.
- if ptrSize == 8 && uint64(n) > 1<<32 || goos_nacl != 0 {
+ if sys.PtrSize == 8 && uint64(n) > 1<<32 || sys.GoosNacl != 0 {
*reserved = false
return v
}
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go
index e8c8999847..330504ba9d 100644
--- a/src/runtime/mem_linux.go
+++ b/src/runtime/mem_linux.go
@@ -4,10 +4,13 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
- _PAGE_SIZE = _PhysPageSize
+ _PAGE_SIZE = sys.PhysPageSize
_EACCES = 13
)
@@ -94,8 +97,8 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
// gets most of the benefit of huge pages while keeping the
// number of VMAs under control. With hugePageSize = 2MB, even
// a pessimal heap can reach 128GB before running out of VMAs.
- if hugePageSize != 0 {
- var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
+ if sys.HugePageSize != 0 {
+ var s uintptr = sys.HugePageSize // division by constant 0 is a compile-time error :(
// If it's a large allocation, we want to leave huge
// pages enabled. Hence, we only adjust the huge page
@@ -114,17 +117,17 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
// Note that madvise will return EINVAL if the flag is
// already set, which is quite likely. We ignore
// errors.
- if head != 0 && head+hugePageSize == tail {
+ if head != 0 && head+sys.HugePageSize == tail {
// head and tail are different but adjacent,
// so do this in one call.
- madvise(unsafe.Pointer(head), 2*hugePageSize, _MADV_NOHUGEPAGE)
+ madvise(unsafe.Pointer(head), 2*sys.HugePageSize, _MADV_NOHUGEPAGE)
} else {
// Advise the huge pages containing v and v+n-1.
if head != 0 {
- madvise(unsafe.Pointer(head), hugePageSize, _MADV_NOHUGEPAGE)
+ madvise(unsafe.Pointer(head), sys.HugePageSize, _MADV_NOHUGEPAGE)
}
if tail != 0 && tail != head {
- madvise(unsafe.Pointer(tail), hugePageSize, _MADV_NOHUGEPAGE)
+ madvise(unsafe.Pointer(tail), sys.HugePageSize, _MADV_NOHUGEPAGE)
}
}
}
@@ -133,7 +136,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
}
func sysUsed(v unsafe.Pointer, n uintptr) {
- if hugePageSize != 0 {
+ if sys.HugePageSize != 0 {
// Partially undo the NOHUGEPAGE marks from sysUnused
// for whole huge pages between v and v+n. This may
// leave huge pages off at the end points v and v+n
@@ -142,7 +145,7 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
// the end points as well, but it's probably not worth
// the cost because when neighboring allocations are
// freed sysUnused will just set NOHUGEPAGE again.
- var s uintptr = hugePageSize
+ var s uintptr = sys.HugePageSize
// Round v up to a huge page boundary.
beg := (uintptr(v) + (s - 1)) &^ (s - 1)
@@ -172,7 +175,7 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
- if ptrSize == 8 && uint64(n) > 1<<32 {
+ if sys.PtrSize == 8 && uint64(n) > 1<<32 {
p := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if p != v {
if uintptr(p) >= 4096 {
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 8f0f31e7e8..512edeffe8 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -8,6 +8,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -16,14 +17,14 @@ type finblock struct {
next *finblock
cnt int32
_ int32
- fin [(_FinBlockSize - 2*ptrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+ fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / ptrSize / 8]byte
+var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
@@ -76,12 +77,12 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
- if (unsafe.Sizeof(finalizer{}) != 5*ptrSize ||
+ if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
- unsafe.Offsetof(finalizer{}.arg) != ptrSize ||
- unsafe.Offsetof(finalizer{}.nret) != 2*ptrSize ||
- unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
- unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize) {
+ unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
@@ -361,7 +362,7 @@ okarg:
for _, t := range ft.out {
nret = round(nret, uintptr(t.align)) + uintptr(t.size)
}
- nret = round(nret, ptrSize)
+ nret = round(nret, sys.PtrSize)
// make sure we have a finalizer goroutine
createfing()
@@ -379,7 +380,7 @@ okarg:
func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
c := gomcache()
c.local_nlookup++
- if ptrSize == 4 && c.local_nlookup >= 1<<30 {
+ if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
// purge cache stats to prevent overflow
lock(&mheap_.lock)
purgecachedstats(c)
@@ -394,7 +395,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
}
p := uintptr(v) >> pageShift
q := p - arena_start>>pageShift
- s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize))
+ s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*sys.PtrSize))
if s == nil {
return
}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index b49452a3ea..e60355083d 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -122,6 +122,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -370,7 +371,7 @@ type gcControllerState struct {
// at the end of of each cycle.
triggerRatio float64
- _ [_CacheLineSize]byte
+ _ [sys.CacheLineSize]byte
// fractionalMarkWorkersNeeded is the number of fractional
// mark workers that need to be started. This is either 0 or
@@ -378,7 +379,7 @@ type gcControllerState struct {
// scheduling point (hence it gets its own cache line).
fractionalMarkWorkersNeeded int64
- _ [_CacheLineSize]byte
+ _ [sys.CacheLineSize]byte
}
// startCycle resets the GC controller's state and computes estimates
@@ -730,9 +731,9 @@ const gcAssistTimeSlack = 5000
const gcOverAssistBytes = 1 << 20
var work struct {
- full uint64 // lock-free list of full blocks workbuf
- empty uint64 // lock-free list of empty blocks workbuf
- pad0 [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+ full uint64 // lock-free list of full blocks workbuf
+ empty uint64 // lock-free list of empty blocks workbuf
+ pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
markrootNext uint32 // next markroot job
markrootJobs uint32 // number of markroot jobs
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index c038dea04a..455ee34ec2 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -8,6 +8,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -201,7 +202,7 @@ func markroot(i uint32) {
//
//go:nowritebarrier
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
- if rootBlockBytes%(8*ptrSize) != 0 {
+ if rootBlockBytes%(8*sys.PtrSize) != 0 {
// This is necessary to pick byte offsets in ptrmask0.
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
@@ -210,7 +211,7 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
if b >= b0+n0 {
return
}
- ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*ptrSize))))
+ ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
n := uintptr(rootBlockBytes)
if b+n > b0+n0 {
n = b0 + n0 - b
@@ -300,7 +301,7 @@ func markrootSpans(gcw *gcWork, shard int) {
scanobject(p, gcw)
// The special itself is a root.
- scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptrmask[0], gcw)
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw)
}
unlock(&s.speciallock)
@@ -704,11 +705,11 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
// Scan local variables if stack frame has been allocated.
size := frame.varp - frame.sp
var minsize uintptr
- switch thechar {
+ switch sys.TheChar {
case '7':
- minsize = spAlign
+ minsize = sys.SpAlign
default:
- minsize = minFrameSize
+ minsize = sys.MinFrameSize
}
if size > minsize {
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
@@ -724,7 +725,7 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
throw("scanframe: bad symbol table")
}
bv := stackmapdata(stkmap, pcdata)
- size = uintptr(bv.n) * ptrSize
+ size = uintptr(bv.n) * sys.PtrSize
scanblock(frame.varp-size, size, bv.bytedata, gcw)
}
@@ -746,7 +747,7 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
}
bv = stackmapdata(stkmap, pcdata)
}
- scanblock(frame.argp, uintptr(bv.n)*ptrSize, bv.bytedata, gcw)
+ scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw)
}
}
@@ -912,9 +913,9 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
for i := uintptr(0); i < n; {
// Find bits for the next word.
- bits := uint32(*addb(ptrmask, i/(ptrSize*8)))
+ bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
if bits == 0 {
- i += ptrSize * 8
+ i += sys.PtrSize * 8
continue
}
for j := 0; j < 8 && i < n; j++ {
@@ -928,7 +929,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
}
}
bits >>= 1
- i += ptrSize
+ i += sys.PtrSize
}
}
}
@@ -962,7 +963,7 @@ func scanobject(b uintptr, gcw *gcWork) {
}
var i uintptr
- for i = 0; i < n; i += ptrSize {
+ for i = 0; i < n; i += sys.PtrSize {
// Find bits for this word.
if i != 0 {
// Avoid needless hbits.next() on last iteration.
@@ -973,7 +974,7 @@ func scanobject(b uintptr, gcw *gcWork) {
// are pointers, or else they'd be merged with other non-pointer
// data into larger allocations.
bits := hbits.bits()
- if i >= 2*ptrSize && bits&bitMarked == 0 {
+ if i >= 2*sys.PtrSize && bits&bitMarked == 0 {
break // no more pointers in this object
}
if bits&bitPointer == 0 {
@@ -1019,7 +1020,7 @@ func shade(b uintptr) {
//go:nowritebarrier
func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork) {
// obj should be start of allocation, and so must be at least pointer-aligned.
- if obj&(ptrSize-1) != 0 {
+ if obj&(sys.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
@@ -1087,11 +1088,11 @@ func gcDumpObject(label string, obj, off uintptr) {
}
print(" s.start*_PageSize=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
skipped := false
- for i := uintptr(0); i < s.elemsize; i += ptrSize {
+ for i := uintptr(0); i < s.elemsize; i += sys.PtrSize {
// For big objects, just print the beginning (because
// that usually hints at the object's type) and the
// fields around off.
- if !(i < 128*ptrSize || off-16*ptrSize < i && i < off+16*ptrSize) {
+ if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
skipped = true
continue
}
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index e670689f1e..22f51dbc1a 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -8,6 +8,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -280,10 +281,10 @@ func (s *mspan) sweep(preserve bool) bool {
freeToHeap = true
} else {
// Free small object.
- if size > 2*ptrSize {
- *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
- } else if size > ptrSize {
- *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
+ if size > 2*sys.PtrSize {
+ *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
+ } else if size > sys.PtrSize {
+ *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = 0
}
if head.ptr() == nil {
head = gclinkptr(p)
diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go
index 3654778c94..0a0285d816 100644
--- a/src/runtime/mgcwork.go
+++ b/src/runtime/mgcwork.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -265,7 +266,7 @@ type workbufhdr struct {
type workbuf struct {
workbufhdr
// account for the above fields
- obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / ptrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
@@ -343,7 +344,7 @@ func getempty(entry int) *workbuf {
}
}
if b == nil {
- b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
+ b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
}
b.logget(entry)
return b
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 2feba436f4..d04297cc80 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -10,6 +10,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -58,7 +59,7 @@ type mheap struct {
// gets its own cache line.
central [_NumSizeClasses]struct {
mcentral mcentral
- pad [_CacheLineSize]byte
+ pad [sys.CacheLineSize]byte
}
spanalloc fixalloc // allocator for span*
@@ -169,13 +170,13 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh)
s := (*mspan)(p)
if len(h_allspans) >= cap(h_allspans) {
- n := 64 * 1024 / ptrSize
+ n := 64 * 1024 / sys.PtrSize
if n < cap(h_allspans)*3/2 {
n = cap(h_allspans) * 3 / 2
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys)
+ sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
@@ -186,7 +187,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
// Don't free the old array if it's referenced by sweep.
// See the comment in mgc.go.
if h.allspans != mheap_.gcspans {
- sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*ptrSize, &memstats.other_sys)
+ sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys)
}
}
h_allspans = new
@@ -239,7 +240,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
_g_ := getg()
_g_.m.mcache.local_nlookup++
- if ptrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
+ if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
// purge cache stats to prevent overflow
lock(&mheap_.lock)
purgecachedstats(_g_.m.mcache)
@@ -305,8 +306,8 @@ func (h *mheap) init(spans_size uintptr) {
sp := (*slice)(unsafe.Pointer(&h_spans))
sp.array = unsafe.Pointer(h.spans)
- sp.len = int(spans_size / ptrSize)
- sp.cap = int(spans_size / ptrSize)
+ sp.len = int(spans_size / sys.PtrSize)
+ sp.cap = int(spans_size / sys.PtrSize)
}
// mHeap_MapSpans makes sure that the spans are mapped
@@ -321,8 +322,8 @@ func (h *mheap) mapSpans(arena_used uintptr) {
// Map spans array, PageSize at a time.
n := arena_used
n -= h.arena_start
- n = n / _PageSize * ptrSize
- n = round(n, _PhysPageSize)
+ n = n / _PageSize * sys.PtrSize
+ n = round(n, sys.PhysPageSize)
if h.spans_mapped >= n {
return
}
@@ -797,7 +798,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
h.spanalloc.free(unsafe.Pointer(t))
}
}
- if (p+s.npages)*ptrSize < h.spans_mapped {
+ if (p+s.npages)*sys.PtrSize < h.spans_mapped {
t := h_spans[p+s.npages]
if t != nil && t.state == _MSpanFree {
s.npages += t.npages
@@ -829,7 +830,7 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
}
func scavengelist(list *mSpanList, now, limit uint64) uintptr {
- if _PhysPageSize > _PageSize {
+ if sys.PhysPageSize > _PageSize {
// golang.org/issue/9993
// If the physical page size of the machine is larger than
// our logical heap page size the kernel may round up the
@@ -1098,7 +1099,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
scanobject(uintptr(base), gcw)
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
- scanblock(uintptr(unsafe.Pointer(&s.fn)), ptrSize, &oneptrmask[0], gcw)
+ scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
if gcBlackenPromptly {
gcw.dispose()
}
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 74226904dc..2db01da375 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -8,6 +8,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -369,7 +370,7 @@ func purgecachedstats(c *mcache) {
// overflow errors.
//go:nosplit
func mSysStatInc(sysStat *uint64, n uintptr) {
- if _BigEndian != 0 {
+ if sys.BigEndian != 0 {
atomic.Xadd64(sysStat, int64(n))
return
}
@@ -383,7 +384,7 @@ func mSysStatInc(sysStat *uint64, n uintptr) {
// mSysStatInc apply.
//go:nosplit
func mSysStatDec(sysStat *uint64, n uintptr) {
- if _BigEndian != 0 {
+ if sys.BigEndian != 0 {
atomic.Xadd64(sysStat, -int64(n))
return
}
diff --git a/src/runtime/mstkbar.go b/src/runtime/mstkbar.go
index 9a27d2adf9..964e09ed0a 100644
--- a/src/runtime/mstkbar.go
+++ b/src/runtime/mstkbar.go
@@ -105,7 +105,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const debugStackBarrier = false
@@ -170,9 +173,9 @@ func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
if usesLR {
lrUintptr = frame.sp
} else {
- lrUintptr = frame.fp - regSize
+ lrUintptr = frame.fp - sys.RegSize
}
- lrPtr := (*uintreg)(unsafe.Pointer(lrUintptr))
+ lrPtr := (*sys.Uintreg)(unsafe.Pointer(lrUintptr))
if debugStackBarrier {
print("install stack barrier at ", hex(lrUintptr), " over ", hex(*lrPtr), ", goid=", gp.goid, "\n")
if uintptr(*lrPtr) != frame.lr {
@@ -185,7 +188,7 @@ func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
stkbar := &gp.stkbar[len(gp.stkbar)-1]
stkbar.savedLRPtr = lrUintptr
stkbar.savedLRVal = uintptr(*lrPtr)
- *lrPtr = uintreg(stackBarrierPC)
+ *lrPtr = sys.Uintreg(stackBarrierPC)
return true
}
@@ -218,8 +221,8 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
if debugStackBarrier {
print("remove stack barrier at ", hex(stkbar.savedLRPtr), " with ", hex(stkbar.savedLRVal), ", goid=", gp.goid, "\n")
}
- lrPtr := (*uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
- if val := *lrPtr; val != uintreg(stackBarrierPC) {
+ lrPtr := (*sys.Uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
+ if val := *lrPtr; val != sys.Uintreg(stackBarrierPC) {
printlock()
print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
print("gp.stkbar=")
@@ -227,7 +230,7 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
print(", gp.stkbarPos=", gp.stkbarPos, ", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
throw("stack barrier lost")
}
- *lrPtr = uintreg(stkbar.savedLRVal)
+ *lrPtr = sys.Uintreg(stkbar.savedLRVal)
}
// gcPrintStkbars prints a []stkbar for debugging.
diff --git a/src/runtime/os1_freebsd.go b/src/runtime/os1_freebsd.go
index f3519f3490..faf27f411c 100644
--- a/src/runtime/os1_freebsd.go
+++ b/src/runtime/os1_freebsd.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// From FreeBSD's <sys/sysctl.h>
const (
@@ -133,7 +136,7 @@ func minit() {
// m.procid is a uint64, but thr_new writes a uint32 on 32-bit systems.
// Fix it up. (Only matters on big-endian, but be clean anyway.)
- if ptrSize == 4 {
+ if sys.PtrSize == 4 {
_g_.m.procid = uint64(*(*uint32)(unsafe.Pointer(&_g_.m.procid)))
}
diff --git a/src/runtime/os1_linux.go b/src/runtime/os1_linux.go
index 1d21d3b5aa..d59ca3915e 100644
--- a/src/runtime/os1_linux.go
+++ b/src/runtime/os1_linux.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// Linux futex.
//
@@ -44,7 +47,7 @@ func futexsleep(addr *uint32, val uint32, ns int64) {
// But on real 64-bit systems, where words are larger but the stack limit
// is not, even timediv is too heavy, and we really need to use just an
// ordinary machine instruction.
- if ptrSize == 8 {
+ if sys.PtrSize == 8 {
ts.set_sec(ns / 1000000000)
ts.set_nsec(int32(ns % 1000000000))
} else {
@@ -81,10 +84,10 @@ func getproccount() int32 {
// buffers, but we don't have a dynamic memory allocator at the
// moment, so that's a bit tricky and seems like overkill.
const maxCPUs = 64 * 1024
- var buf [maxCPUs / (ptrSize * 8)]uintptr
+ var buf [maxCPUs / (sys.PtrSize * 8)]uintptr
r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
n := int32(0)
- for _, v := range buf[:r/ptrSize] {
+ for _, v := range buf[:r/sys.PtrSize] {
for v != 0 {
n += int32(v & 1)
v >>= 1
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index 43918bb054..f660cc72a7 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
@@ -69,11 +72,11 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
// to sigpanic instead. (Otherwise the trace will end at
// sigpanic and we won't get to see who faulted).
if pc != 0 {
- if regSize > ptrSize {
- sp -= ptrSize
+ if sys.RegSize > sys.PtrSize {
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
}
- sp -= ptrSize
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.setsp(sp)
}
diff --git a/src/runtime/os_linux_386.go b/src/runtime/os_linux_386.go
index e2120da905..3577a2406b 100644
--- a/src/runtime/os_linux_386.go
+++ b/src/runtime/os_linux_386.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
_AT_NULL = 0
@@ -21,7 +24,7 @@ func sysargs(argc int32, argv **byte) {
n++
}
n++
- auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+ auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
for i := 0; auxv[i] != _AT_NULL; i += 2 {
switch auxv[i] {
diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go
index 3749640ee5..8fdfb585ba 100644
--- a/src/runtime/os_linux_arm.go
+++ b/src/runtime/os_linux_arm.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
_AT_NULL = 0
@@ -40,7 +43,7 @@ func sysargs(argc int32, argv **byte) {
n++
}
n++
- auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+ auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
for i := 0; auxv[i] != _AT_NULL; i += 2 {
switch auxv[i] {
diff --git a/src/runtime/parfor.go b/src/runtime/parfor.go
index 2db43bd424..9e11cb3e12 100644
--- a/src/runtime/parfor.go
+++ b/src/runtime/parfor.go
@@ -6,7 +6,10 @@
package runtime
-import "runtime/internal/atomic"
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+)
// A parfor holds state for the parallel for operation.
type parfor struct {
@@ -38,7 +41,7 @@ type parforthread struct {
nprocyield uint64
nosyield uint64
nsleep uint64
- pad [_CacheLineSize]byte
+ pad [sys.CacheLineSize]byte
}
func parforalloc(nthrmax uint32) *parfor {
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 74158a4423..11ecdae5ae 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -55,7 +56,7 @@ func main() {
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
- if ptrSize == 8 {
+ if sys.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
@@ -306,7 +307,7 @@ func releaseSudog(s *sudog) {
// It assumes that f is a func value. Otherwise the behavior is undefined.
//go:nosplit
func funcPC(f interface{}) uintptr {
- return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
+ return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
}
// called from assembly
@@ -393,10 +394,10 @@ func schedinit() {
throw("unknown runnable goroutine during bootstrap")
}
- if buildVersion == "" {
+ if sys.BuildVersion == "" {
// Condition should never trigger. This code just serves
// to ensure runtime·buildVersion is kept in the resulting binary.
- buildVersion = "unknown"
+ sys.BuildVersion = "unknown"
}
}
@@ -999,7 +1000,7 @@ func mstart() {
// Cgo may have left stack size in stack.hi.
size := _g_.stack.hi
if size == 0 {
- size = 8192 * stackGuardMultiplier
+ size = 8192 * sys.StackGuardMultiplier
}
_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
_g_.stack.lo = _g_.stack.hi - size + 1024
@@ -1202,7 +1203,7 @@ func allocm(_p_ *p, fn func()) *m {
if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
mp.g0 = malg(-1)
} else {
- mp.g0 = malg(8192 * stackGuardMultiplier)
+ mp.g0 = malg(8192 * sys.StackGuardMultiplier)
}
mp.g0.m = mp
@@ -1305,9 +1306,9 @@ func newextram() {
// the goroutine stack ends.
mp := allocm(nil, nil)
gp := malg(4096)
- gp.sched.pc = funcPC(goexit) + _PCQuantum
+ gp.sched.pc = funcPC(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame
+ gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
@@ -2535,7 +2536,7 @@ func malg(stacksize int32) *g {
// copied if a stack split occurred.
//go:nosplit
func newproc(siz int32, fn *funcval) {
- argp := add(unsafe.Pointer(&fn), ptrSize)
+ argp := add(unsafe.Pointer(&fn), sys.PtrSize)
pc := getcallerpc(unsafe.Pointer(&siz))
systemstack(func() {
newproc1(fn, (*uint8)(argp), siz, 0, pc)
@@ -2561,7 +2562,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
// Not worth it: this is almost always an error.
// 4*sizeof(uintreg): extra space added below
// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
- if siz >= _StackMin-4*regSize-regSize {
+ if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
throw("newproc: function arguments too large for new goroutine")
}
@@ -2580,21 +2581,21 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
throw("newproc1: new g is not Gdead")
}
- totalSize := 4*regSize + uintptr(siz) + minFrameSize // extra space in case of reads slightly beyond frame
- totalSize += -totalSize & (spAlign - 1) // align to spAlign
+ totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
+ totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign
sp := newg.stack.hi - totalSize
spArg := sp
if usesLR {
// caller's LR
*(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
- spArg += minFrameSize
+ spArg += sys.MinFrameSize
}
memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
newg.sched.sp = sp
newg.stktopsp = sp
- newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function
+ newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
newg.sched.g = guintptr(unsafe.Pointer(newg))
gostartcallfn(&newg.sched, fn)
newg.gopc = callerpc
@@ -2928,13 +2929,13 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
n = 2
// "ExternalCode" is better than "etext".
if pc > firstmoduledata.etext {
- pc = funcPC(_ExternalCode) + _PCQuantum
+ pc = funcPC(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" || mp.helpgc != 0 {
- stk[1] = funcPC(_GC) + _PCQuantum
+ stk[1] = funcPC(_GC) + sys.PCQuantum
} else {
- stk[1] = funcPC(_System) + _PCQuantum
+ stk[1] = funcPC(_System) + sys.PCQuantum
}
}
}
@@ -3981,7 +3982,7 @@ func setMaxThreads(in int) (out int) {
}
func haveexperiment(name string) bool {
- x := goexperiment
+ x := sys.Goexperiment
for x != "" {
xname := ""
i := index(x, ",")
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 9a468443fd..4c0242350c 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -52,7 +53,7 @@ var (
// nosplit for use in linux/386 startup linux_setup_vdso
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
- return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize))
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
}
func args(c int32, v **byte) {
@@ -192,10 +193,10 @@ func check() {
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
- if unsafe.Sizeof(k) != ptrSize {
+ if unsafe.Sizeof(k) != sys.PtrSize {
throw("bad k")
}
- if unsafe.Sizeof(l) != ptrSize {
+ if unsafe.Sizeof(l) != sys.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
@@ -238,7 +239,7 @@ func check() {
}
k = unsafe.Pointer(uintptr(0xfedcb123))
- if ptrSize == 8 {
+ if sys.PtrSize == 8 {
k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
}
if casp(&k, nil, nil) {
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index aafb8cf3cd..9ec0d1545e 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -48,14 +49,6 @@ const (
_Pdead
)
-// The next line makes 'go generate' write the zgen_*.go files with
-// per-OS and per-arch information, including constants
-// named goos_$GOOS and goarch_$GOARCH for every
-// known GOOS and GOARCH. The constant is 1 on the
-// current system, 0 otherwise; multiplying by them is
-// useful for defining GOOS- or GOARCH-specific constants.
-//go:generate go run gengoos.go
-
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
@@ -151,7 +144,7 @@ type gobuf struct {
pc uintptr
g guintptr
ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
- ret uintreg
+ ret sys.Uintreg
lr uintptr
bp uintptr // for GOEXPERIMENT=framepointer
}
@@ -533,7 +526,7 @@ type forcegcstate struct {
* known to compiler
*/
const (
- _Structrnd = regSize
+ _Structrnd = sys.RegSize
)
// startup_random_data holds random bytes initialized at startup. These come from
@@ -553,7 +546,7 @@ func extendRandom(r []byte, n int) {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
- for i := 0; i < ptrSize && n < len(r); i++ {
+ for i := 0; i < sys.PtrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 508a19b630..b6c3fea001 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -6,7 +6,10 @@ package runtime
// This file contains the implementation of Go select statements.
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
const (
debugSelect = false
@@ -51,7 +54,7 @@ func selectsize(size uintptr) uintptr {
(size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
size*unsafe.Sizeof(*hselect{}.lockorder) +
size*unsafe.Sizeof(*hselect{}.pollorder)
- return round(selsize, _Int64Align)
+ return round(selsize, sys.Int64Align)
}
func newselect(sel *hselect, selsize int64, size int32) {
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index d9bf4c1cfd..b54621bad8 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -21,6 +21,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -38,7 +39,7 @@ const semTabSize = 251
var semtable [semTabSize]struct {
root semaRoot
- pad [_CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
+ pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
}
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
diff --git a/src/runtime/signal1_unix.go b/src/runtime/signal1_unix.go
index 8cabbc20e2..3a8b5ba460 100644
--- a/src/runtime/signal1_unix.go
+++ b/src/runtime/signal1_unix.go
@@ -6,6 +6,8 @@
package runtime
+import "runtime/internal/sys"
+
const (
_SIG_DFL uintptr = 0
_SIG_IGN uintptr = 1
@@ -185,7 +187,7 @@ func crash() {
// this means the OS X core file will be >128 GB and even on a zippy
// workstation can take OS X well over an hour to write (uninterruptible).
// Save users from making that mistake.
- if ptrSize == 8 {
+ if sys.PtrSize == 8 {
return
}
}
diff --git a/src/runtime/signal_386.go b/src/runtime/signal_386.go
index 04218f97ea..90d69ee389 100644
--- a/src/runtime/signal_386.go
+++ b/src/runtime/signal_386.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("eax ", hex(c.eax()), "\n")
@@ -85,11 +88,11 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// (Otherwise the trace will end at runtime.sigpanic and we
// won't get to see who faulted.)
if pc != 0 {
- if regSize > ptrSize {
- sp -= ptrSize
+ if sys.RegSize > sys.PtrSize {
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
}
- sp -= ptrSize
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.set_esp(uint32(sp))
}
diff --git a/src/runtime/signal_amd64x.go b/src/runtime/signal_amd64x.go
index 473f762918..df317e3835 100644
--- a/src/runtime/signal_amd64x.go
+++ b/src/runtime/signal_amd64x.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "runtime/internal/sys"
"unsafe"
)
@@ -119,11 +120,11 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// (Otherwise the trace will end at runtime.sigpanic and we
// won't get to see who faulted.)
if pc != 0 {
- if regSize > ptrSize {
- sp -= ptrSize
+ if sys.RegSize > sys.PtrSize {
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
}
- sp -= ptrSize
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.set_rsp(uint64(sp))
}
diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go
index 18ecdc29ce..96a4cb3dac 100644
--- a/src/runtime/signal_arm64.go
+++ b/src/runtime/signal_arm64.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("r0 ", hex(c.r0()), "\n")
@@ -78,7 +81,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - spAlign // needs only sizeof uint64, but must align the stack
+ sp := c.sp() - sys.SpAlign // needs only sizeof uint64, but must align the stack
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr()
diff --git a/src/runtime/signal_linux_386.go b/src/runtime/signal_linux_386.go
index 085f66e898..45074f9d90 100644
--- a/src/runtime/signal_linux_386.go
+++ b/src/runtime/signal_linux_386.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
type sigctxt struct {
info *siginfo
@@ -32,5 +35,5 @@ func (c *sigctxt) set_eip(x uint32) { c.regs().eip = x }
func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_amd64.go b/src/runtime/signal_linux_amd64.go
index 5e339b8a46..b8b38ccca9 100644
--- a/src/runtime/signal_linux_amd64.go
+++ b/src/runtime/signal_linux_amd64.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
type sigctxt struct {
info *siginfo
@@ -42,5 +45,5 @@ func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_arm.go b/src/runtime/signal_linux_arm.go
index bdb4314fa8..469f47ca7f 100644
--- a/src/runtime/signal_linux_arm.go
+++ b/src/runtime/signal_linux_arm.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
type sigctxt struct {
info *siginfo
@@ -44,5 +47,5 @@ func (c *sigctxt) set_r10(x uint32) { c.regs().r10 = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_arm64.go b/src/runtime/signal_linux_arm64.go
index 7d8b010425..465fc4ffa3 100644
--- a/src/runtime/signal_linux_arm64.go
+++ b/src/runtime/signal_linux_arm64.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
type sigctxt struct {
info *siginfo
@@ -57,5 +60,5 @@ func (c *sigctxt) set_lr(x uint64) { c.regs().regs[30] = x }
func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_ppc64x.go b/src/runtime/signal_linux_ppc64x.go
index da3afc9e9e..5445201b19 100644
--- a/src/runtime/signal_linux_ppc64x.go
+++ b/src/runtime/signal_linux_ppc64x.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
type sigctxt struct {
info *siginfo
@@ -67,5 +70,5 @@ func (c *sigctxt) set_link(x uint64) { c.regs().link = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go
index e1e690efc7..774aa42c77 100644
--- a/src/runtime/signal_ppc64x.go
+++ b/src/runtime/signal_ppc64x.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("r0 ", hex(c.r0()), "\t")
@@ -82,7 +85,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - minFrameSize
+ sp := c.sp() - sys.MinFrameSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index f217564aa9..db25636885 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -6,6 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -64,7 +65,7 @@ const (
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and Darwin/ARM because they do not use a separate stack.
- _StackSystem = goos_windows*512*ptrSize + goos_plan9*512 + goos_darwin*goarch_arm*1024
+ _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
// The minimum size of stack used by Go code
_StackMin = 2048
@@ -89,7 +90,7 @@ const (
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
- _StackGuard = 640*stackGuardMultiplier + _StackSystem
+ _StackGuard = 640*sys.StackGuardMultiplier + _StackSystem
// After a stack split check the SP is allowed to be this
// many bytes below the stack guard. This saves an instruction
@@ -125,7 +126,7 @@ const (
)
const (
- uintptrMask = 1<<(8*ptrSize) - 1
+ uintptrMask = 1<<(8*sys.PtrSize) - 1
poisonStack = uintptrMask & 0x6868686868686868
// Goroutine preemption request.
@@ -536,10 +537,10 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
num := uintptr(bv.n)
for i := uintptr(0); i < num; i++ {
if stackDebug >= 4 {
- print(" ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
+ print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
}
if ptrbit(&bv, i) == 1 {
- pp := (*uintptr)(add(scanp, i*ptrSize))
+ pp := (*uintptr)(add(scanp, i*sys.PtrSize))
p := *pp
if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
// Looks like a junk value in a pointer slot.
@@ -587,11 +588,11 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
// Adjust local variables if stack frame has been allocated.
size := frame.varp - frame.sp
var minsize uintptr
- switch thechar {
+ switch sys.TheChar {
case '7':
- minsize = spAlign
+ minsize = sys.SpAlign
default:
- minsize = minFrameSize
+ minsize = sys.MinFrameSize
}
if size > minsize {
var bv bitvector
@@ -607,15 +608,15 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
- size = uintptr(bv.n) * ptrSize
+ size = uintptr(bv.n) * sys.PtrSize
if stackDebug >= 3 {
- print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
+ print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
}
adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
}
// Adjust saved base pointer if there is one.
- if thechar == '6' && frame.argp-frame.varp == 2*regSize {
+ if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
if !framepointer_enabled {
print("runtime: found space for saved base pointer, but no framepointer experiment\n")
print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
@@ -841,9 +842,9 @@ func newstack() {
throw("missing stack in newstack")
}
sp := gp.sched.sp
- if thechar == '6' || thechar == '8' {
+ if sys.TheChar == '6' || sys.TheChar == '8' {
// The call to morestack cost a word.
- sp -= ptrSize
+ sp -= sys.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
@@ -978,7 +979,7 @@ func shrinkstack(gp *g) {
if gp.syscallsp != 0 {
return
}
- if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
+ if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
return
}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 58ea474c70..9ead56e897 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -6,12 +6,6 @@ package runtime
import "unsafe"
-// Declarations for runtime services implemented in C or assembly.
-
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
-const spAlign = 1*(1-goarch_arm64) + 16*goarch_arm64 // SP alignment: 1 normally, 16 for ARM64
-
// Should be a built-in for unsafe.Pointer?
//go:nosplit
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index c3235fac03..00b0a850e0 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// NOTE: Func does not expose the actual unexported fields, because we return *Func
// values to users, and we want to keep them from being able to overwrite the data
@@ -105,7 +108,7 @@ func moduledataverify1(datap *moduledata) {
// and a byte giving the pointer width in bytes.
pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable))
pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable))
- if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != _PCQuantum || pcln[7] != ptrSize {
+ if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize {
println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
throw("invalid function symbol table\n")
}
@@ -358,7 +361,7 @@ func funcline(f *_func, targetpc uintptr) (file string, line int32) {
func funcspdelta(f *_func, targetpc uintptr, cache *pcvalueCache) int32 {
x := pcvalue(f, f.pcsp, targetpc, cache, true)
- if x&(ptrSize-1) != 0 {
+ if x&(sys.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
return x
@@ -377,13 +380,13 @@ func funcdata(f *_func, i int32) unsafe.Pointer {
return nil
}
p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
- if ptrSize == 8 && uintptr(p)&4 != 0 {
+ if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
if uintptr(unsafe.Pointer(f))&4 != 0 {
println("runtime: misaligned func", f)
}
p = add(p, 4)
}
- return *(*unsafe.Pointer)(add(p, uintptr(i)*ptrSize))
+ return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
}
// step advances to the next pc, value pair in the encoded table.
@@ -399,7 +402,7 @@ func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool)
}
vdelta := int32(uvdelta)
p, pcdelta := readvarint(p)
- *pc += uintptr(pcdelta * _PCQuantum)
+ *pc += uintptr(pcdelta * sys.PCQuantum)
*val += vdelta
return p, true
}
diff --git a/src/runtime/sys_x86.go b/src/runtime/sys_x86.go
index 3f0771fe0c..137e706d24 100644
--- a/src/runtime/sys_x86.go
+++ b/src/runtime/sys_x86.go
@@ -6,17 +6,20 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// adjust Gobuf as it if executed a call to fn with context ctxt
// and then did an immediate gosave.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- if regSize > ptrSize {
- sp -= ptrSize
+ if sys.RegSize > sys.PtrSize {
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = 0
}
- sp -= ptrSize
+ sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index d85b0a985c..46ddb64374 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -14,6 +14,7 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
@@ -69,7 +70,7 @@ const (
// and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures.
- traceTickDiv = 16 + 48*(goarch_386|goarch_amd64|goarch_amd64p32)
+ traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
// Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here.
@@ -704,7 +705,7 @@ Search:
// newStack allocates a new stack of size n.
func (tab *traceStackTable) newStack(n int) *traceStack {
- return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*ptrSize))
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
}
// dump writes all previously cached stacks to trace buffers,
@@ -751,12 +752,12 @@ type traceAlloc struct {
// traceAllocBlock is a block in traceAlloc.
type traceAllocBlock struct {
next *traceAllocBlock
- data [64<<10 - ptrSize]byte
+ data [64<<10 - sys.PtrSize]byte
}
// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = round(n, ptrSize)
+ n = round(n, sys.PtrSize)
if a.head == nil || a.off+n > uintptr(len(a.head.data)) {
if n > uintptr(len(a.head.data)) {
throw("trace: alloc too large")
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 8b33e4a29e..56fbbeae11 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// The code in this file implements stack trace walking for all architectures.
// The most important fact about a given architecture is whether it uses a link register.
@@ -29,7 +32,7 @@ import "unsafe"
// usesLR is defined below in terms of minFrameSize, which is defined in
// arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go.
-const usesLR = minFrameSize > 0
+const usesLR = sys.MinFrameSize > 0
var (
// initialized in tracebackinit
@@ -181,8 +184,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.lr = 0
} else {
- frame.pc = uintptr(*(*uintreg)(unsafe.Pointer(frame.sp)))
- frame.sp += regSize
+ frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp)))
+ frame.sp += sys.RegSize
}
}
@@ -222,7 +225,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.fp = sp + uintptr(funcspdelta(f, frame.pc, &cache))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.fp += regSize
+ frame.fp += sys.RegSize
}
}
var flr *_func
@@ -249,8 +252,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
}
} else {
if frame.lr == 0 {
- lrPtr = frame.fp - regSize
- frame.lr = uintptr(*(*uintreg)(unsafe.Pointer(lrPtr)))
+ lrPtr = frame.fp - sys.RegSize
+ frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr)))
}
}
if frame.lr == stackBarrierPC {
@@ -280,13 +283,13 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.varp -= regSize
+ frame.varp -= sys.RegSize
}
// If framepointer_enabled and there's a frame, then
// there's a saved bp here.
if framepointer_enabled && GOARCH == "amd64" && frame.varp > frame.sp {
- frame.varp -= regSize
+ frame.varp -= sys.RegSize
}
// Derive size of arguments.
@@ -296,7 +299,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
// in package runtime and reflect, and for those we use call-specific
// metadata recorded by f's caller.
if callback != nil || printing {
- frame.argp = frame.fp + minFrameSize
+ frame.argp = frame.fp + sys.MinFrameSize
setArgInfo(&frame, f, callback != nil)
}
@@ -349,7 +352,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
}
print(funcname(f), "(")
argp := (*[100]uintptr)(unsafe.Pointer(frame.argp))
- for i := uintptr(0); i < frame.arglen/ptrSize; i++ {
+ for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ {
if i >= 10 {
print(", ...")
break
@@ -394,10 +397,10 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
// before faking a call to sigpanic.
if usesLR && waspanic {
x := *(*uintptr)(unsafe.Pointer(frame.sp))
- frame.sp += minFrameSize
+ frame.sp += sys.MinFrameSize
if GOARCH == "arm64" {
// arm64 needs 16-byte aligned SP, always
- frame.sp += ptrSize
+ frame.sp += sys.PtrSize
}
f = findfunc(frame.pc)
frame.fn = f
@@ -494,14 +497,14 @@ func setArgInfo(frame *stkframe, f *_func, needArgMap bool) {
// Extract argument bitmaps for reflect stubs from the calls they made to reflect.
switch funcname(f) {
case "reflect.makeFuncStub", "reflect.methodValueCall":
- arg0 := frame.sp + minFrameSize
+ arg0 := frame.sp + sys.MinFrameSize
fn := *(**[2]uintptr)(unsafe.Pointer(arg0))
if fn[0] != f.entry {
print("runtime: confused by ", funcname(f), "\n")
throw("reflect mismatch")
}
bv := (*bitvector)(unsafe.Pointer(fn[1]))
- frame.arglen = uintptr(bv.n * ptrSize)
+ frame.arglen = uintptr(bv.n * sys.PtrSize)
frame.argmap = bv
}
}
@@ -515,7 +518,7 @@ func printcreatedby(gp *g) {
print("created by ", funcname(f), "\n")
tracepc := pc // back up to CALL instruction for funcline.
if pc > f.entry {
- tracepc -= _PCQuantum
+ tracepc -= sys.PCQuantum
}
file, line := funcline(f, tracepc)
print("\t", file, ":", line)
diff --git a/src/runtime/vdso_linux_amd64.go b/src/runtime/vdso_linux_amd64.go
index 244001590a..38914bb2b9 100644
--- a/src/runtime/vdso_linux_amd64.go
+++ b/src/runtime/vdso_linux_amd64.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
// Look up symbols in the Linux vDSO.
@@ -303,7 +306,7 @@ func sysargs(argc int32, argv **byte) {
n++
// now argv+n is auxv
- auxv := (*[1 << 32]elf64Auxv)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+ auxv := (*[1 << 32]elf64Auxv)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
for i := 0; auxv[i].a_type != _AT_NULL; i++ {
av := &auxv[i]
diff --git a/src/runtime/zgoos_android.go b/src/runtime/zgoos_android.go
deleted file mode 100644
index 0590bd9ab7..0000000000
--- a/src/runtime/zgoos_android.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `android`
-
-const goos_android = 1
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_darwin.go b/src/runtime/zgoos_darwin.go
deleted file mode 100644
index c0a7cd6e74..0000000000
--- a/src/runtime/zgoos_darwin.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `darwin`
-
-const goos_android = 0
-const goos_darwin = 1
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_dragonfly.go b/src/runtime/zgoos_dragonfly.go
deleted file mode 100644
index 008d6de811..0000000000
--- a/src/runtime/zgoos_dragonfly.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `dragonfly`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 1
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_freebsd.go b/src/runtime/zgoos_freebsd.go
deleted file mode 100644
index 2478940353..0000000000
--- a/src/runtime/zgoos_freebsd.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `freebsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 1
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_linux.go b/src/runtime/zgoos_linux.go
deleted file mode 100644
index c775ab538d..0000000000
--- a/src/runtime/zgoos_linux.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-// +build !android
-
-package runtime
-
-const theGoos = `linux`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 1
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_nacl.go b/src/runtime/zgoos_nacl.go
deleted file mode 100644
index d9d88f4508..0000000000
--- a/src/runtime/zgoos_nacl.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `nacl`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 1
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_netbsd.go b/src/runtime/zgoos_netbsd.go
deleted file mode 100644
index ff2c5cb8f4..0000000000
--- a/src/runtime/zgoos_netbsd.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `netbsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 1
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_openbsd.go b/src/runtime/zgoos_openbsd.go
deleted file mode 100644
index b071dc63ab..0000000000
--- a/src/runtime/zgoos_openbsd.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `openbsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 1
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_plan9.go b/src/runtime/zgoos_plan9.go
deleted file mode 100644
index 4306b0f1ef..0000000000
--- a/src/runtime/zgoos_plan9.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `plan9`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 1
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_solaris.go b/src/runtime/zgoos_solaris.go
deleted file mode 100644
index 10f9537d05..0000000000
--- a/src/runtime/zgoos_solaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `solaris`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 1
-const goos_windows = 0
diff --git a/src/runtime/zgoos_windows.go b/src/runtime/zgoos_windows.go
deleted file mode 100644
index 56f5c58ce6..0000000000
--- a/src/runtime/zgoos_windows.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `windows`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 1