aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mem_linux.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2017-12-30 19:35:46 -0500
committerAustin Clements <austin@google.com>2018-02-15 21:12:24 +0000
commit51ae88ee2f9a1063c272a497527751d786291c89 (patch)
tree94baf271ae262909ee77c0a475fa538b4808e5d2 /src/runtime/mem_linux.go
parent2b415549b813ba36caafa34fc34d72e47ee8335c (diff)
downloadgo-51ae88ee2f9a1063c272a497527751d786291c89.tar.gz
go-51ae88ee2f9a1063c272a497527751d786291c89.zip
runtime: remove non-reserved heap logic
Currently large sysReserve calls on some OSes don't actually reserve the memory, but just check that it can be reserved. This was important when we called sysReserve to "reserve" many gigabytes for the heap up front, but now that we map memory in small increments as we need it, this complication is no longer necessary. This has one curious side benefit: currently, on Linux, allocations that are large enough to be rejected by mmap wind up freezing the application for a long time before it panics. This happens because sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed, which calls mmap, which fails because the mapping is too large. However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back to probing every page in the desired region individually with mincore before performing an (otherwise dangerous) MAP_FIXED mapping, which will also fail. This takes a long time for a large region. Now this logic is gone, so the mmap failure leads to an immediate panic. Updates #10460. Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e Reviewed-on: https://go-review.googlesource.com/85888 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mem_linux.go')
-rw-r--r--src/runtime/mem_linux.go75
1 files changed, 2 insertions, 73 deletions
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go
index 16f44439f1..7aa48170a1 100644
--- a/src/runtime/mem_linux.go
+++ b/src/runtime/mem_linux.go
@@ -14,46 +14,6 @@ const (
_EINVAL = 22
)
-// NOTE: vec must be just 1 byte long here.
-// Mincore returns ENOMEM if any of the pages are unmapped,
-// but we want to know that all of the pages are unmapped.
-// To make these the same, we can only ask about one page
-// at a time. See golang.org/issue/7476.
-var addrspace_vec [1]byte
-
-func addrspace_free(v unsafe.Pointer, n uintptr) bool {
- for off := uintptr(0); off < n; off += physPageSize {
- // Use a length of 1 byte, which the kernel will round
- // up to one physical page regardless of the true
- // physical page size.
- errval := mincore(unsafe.Pointer(uintptr(v)+off), 1, &addrspace_vec[0])
- if errval == -_EINVAL {
- // Address is not a multiple of the physical
- // page size. Shouldn't happen, but just ignore it.
- continue
- }
- // ENOMEM means unmapped, which is what we want.
- // Anything else we assume means the pages are mapped.
- if errval != -_ENOMEM {
- return false
- }
- }
- return true
-}
-
-func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint32) (unsafe.Pointer, int) {
- p, err := mmap(v, n, prot, flags, fd, offset)
- // On some systems, mmap ignores v without
- // MAP_FIXED, so retry if the address space is free.
- if p != v && addrspace_free(v, n) {
- if err == 0 {
- munmap(p, n)
- }
- p, err = mmap(v, n, prot, flags|_MAP_FIXED, fd, offset)
- }
- return p, err
-}
-
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//go:nosplit
@@ -180,48 +140,17 @@ func sysFault(v unsafe.Pointer, n uintptr) {
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
-func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // if we can reserve at least 64K and check the assumption in SysMap.
- // Only user-mode Linux (UML) rejects these requests.
- if sys.PtrSize == 8 && uint64(n) > 1<<32 {
- p, err := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if p != v || err != 0 {
- if err == 0 {
- munmap(p, 64<<10)
- }
- return nil
- }
- munmap(p, 64<<10)
- *reserved = false
- return v
- }
-
+func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
- *reserved = true
return p
}
-func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
+func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
mSysStatInc(sysStat, n)
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if !reserved {
- p, err := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if err == _ENOMEM {
- throw("runtime: out of memory")
- }
- if p != v || err != 0 {
- print("runtime: address space conflict: map(", v, ") = ", p, " (err ", err, ")\n")
- throw("runtime: address space conflict")
- }
- return
- }
-
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
throw("runtime: out of memory")