aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go43
1 files changed, 17 insertions, 26 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index cc22b0f276..f8d5d48a28 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -101,6 +101,8 @@
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
@@ -150,7 +152,7 @@ const (
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
+ _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
@@ -207,7 +209,7 @@ const (
// arenaBaseOffset to offset into the top 4 GiB.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
+ heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 33*goos.IsIos*goarch.IsArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
@@ -248,10 +250,10 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
@@ -268,7 +270,7 @@ const (
// We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2.
- arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
+ arenaL1Bits = 6 * (_64bit * goos.IsWindows)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
@@ -303,7 +305,7 @@ const (
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
- arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix
+ arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
// A typed version of this constant that will make it into DWARF (for viewcore).
arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
@@ -420,8 +422,6 @@ func mallocinit() {
throw("bad TinySizeClass")
}
- testdefersizes()
-
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
@@ -485,7 +485,7 @@ func mallocinit() {
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
@@ -732,7 +732,7 @@ mapped:
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
@@ -743,9 +743,9 @@ mapped:
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
@@ -753,16 +753,16 @@ mapped:
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
@@ -1017,7 +1017,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
- } else if sys.PtrSize == 4 && size == 12 {
+ } else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
@@ -1088,15 +1088,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var scanSize uintptr
if !noscan {
- // If allocating a defer+arg block, now that we've picked a malloc size
- // large enough to hold everything, cut the "asked for" size down to
- // just the defer header, so that the GC bitmap will record the arg block
- // as containing nothing at all (as if it were unused space at the end of
- // a malloc block caused by size rounding).
- // The defer arg areas are scanned as part of scanstack.
- if typ == deferType {
- dataSize = unsafe.Sizeof(_defer{})
- }
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
@@ -1419,7 +1410,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
break
}
}
- persistent.off = alignUp(sys.PtrSize, align)
+ persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size