aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/export_test.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/export_test.go')
-rw-r--r--src/runtime/export_test.go30
1 files changed, 8 insertions, 22 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index c8d01fbb15..e7279564e3 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -7,6 +7,8 @@
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -27,8 +29,6 @@ var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
var Xadduintptr = atomic.Xadduintptr
-var FuncPC = funcPC
-
var Fastlog2 = fastlog2
var Atoi = atoi
@@ -147,40 +147,28 @@ func RunSchedLocalQueueStealTest() {
}
}
-// Temporary to enable register ABI bringup.
-// TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
-// get passed to the "go" stmts there.
-var RunSchedLocalQueueEmptyState struct {
- done chan bool
- ready *uint32
- p *p
-}
-
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
- RunSchedLocalQueueEmptyState.done = done
p := new(p)
- RunSchedLocalQueueEmptyState.p = p
gs := make([]g, 2)
ready := new(uint32)
- RunSchedLocalQueueEmptyState.ready = ready
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
- for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
- if runqempty(RunSchedLocalQueueEmptyState.p) {
- //println("next:", next0, next1)
+ if runqempty(p) {
+ println("next:", next0, next1)
throw("queue is empty")
}
- RunSchedLocalQueueEmptyState.done <- true
+ done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
@@ -228,8 +216,6 @@ var Write = write
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
-var BigEndian = sys.BigEndian
-
// For benchmarking.
func BenchSetType(n int, x interface{}) {
@@ -259,7 +245,7 @@ func BenchSetType(n int, x interface{}) {
})
}
-const PtrSize = sys.PtrSize
+const PtrSize = goarch.PtrSize
var ForceGCPeriod = &forcegcperiod
@@ -1066,7 +1052,7 @@ func FreePageAlloc(pp *PageAlloc) {
//
// This should not be higher than 0x100*pallocChunkBytes to support
// mips and mipsle, which only have 31-bit address spaces.
-var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix))
+var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*goos.IsAix))
// PageBase returns an address given a chunk index and a page index
// relative to that chunk.