diff options
author | Austin Clements <austin@google.com> | 2017-12-30 19:35:46 -0500 |
---|---|---|
committer | Austin Clements <austin@google.com> | 2018-02-15 21:12:24 +0000 |
commit | 51ae88ee2f9a1063c272a497527751d786291c89 (patch) | |
tree | 94baf271ae262909ee77c0a475fa538b4808e5d2 /src/runtime/mem_bsd.go | |
parent | 2b415549b813ba36caafa34fc34d72e47ee8335c (diff) | |
download | go-51ae88ee2f9a1063c272a497527751d786291c89.tar.gz go-51ae88ee2f9a1063c272a497527751d786291c89.zip |
runtime: remove non-reserved heap logic
Currently large sysReserve calls on some OSes don't actually reserve
the memory, but just check that it can be reserved. This was important
when we called sysReserve to "reserve" many gigabytes for the heap up
front, but now that we map memory in small increments as we need it,
this complication is no longer necessary.
This has one curious side benefit: currently, on Linux, allocations
that are large enough to be rejected by mmap wind up freezing the
application for a long time before it panics. This happens because
sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed,
which calls mmap, which fails because the mapping is too large.
However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back
to probing every page in the desired region individually with mincore
before performing an (otherwise dangerous) MAP_FIXED mapping, which
will also fail. This takes a long time for a large region. Now this
logic is gone, so the mmap failure leads to an immediate panic.
Updates #10460.
Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e
Reviewed-on: https://go-review.googlesource.com/85888
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mem_bsd.go')
-rw-r--r-- | src/runtime/mem_bsd.go | 36 |
1 files changed, 2 insertions, 34 deletions
diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 23872b9a63..cc70e806ea 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -7,7 +7,6 @@ package runtime import ( - "runtime/internal/sys" "unsafe" ) @@ -42,51 +41,20 @@ func sysFault(v unsafe.Pointer, n uintptr) { mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } -func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer { - // On 64-bit, people with ulimit -v set complain if we reserve too - // much address space. Instead, assume that the reservation is okay - // and check the assumption in SysMap. - if sys.PtrSize == 8 && uint64(n) > 1<<32 || sys.GoosNacl != 0 { - *reserved = false - return v - } - +func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } - *reserved = true return p } const _sunosEAGAIN = 11 const _ENOMEM = 12 -func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) { +func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { mSysStatInc(sysStat, n) - // On 64-bit, we don't actually have v reserved, so tread carefully. - if !reserved { - flags := int32(_MAP_ANON | _MAP_PRIVATE) - if GOOS == "dragonfly" { - // TODO(jsing): For some reason DragonFly seems to return - // memory at a different address than we requested, even when - // there should be no reason for it to do so. This can be - // avoided by using MAP_FIXED, but I'm not sure we should need - // to do this - we do not on other platforms. - flags |= _MAP_FIXED - } - p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0) - if err == _ENOMEM || (GOOS == "solaris" && err == _sunosEAGAIN) { - throw("runtime: out of memory") - } - if p != v || err != 0 { - print("runtime: address space conflict: map(", v, ") = ", p, "(err ", err, ")\n") - throw("runtime: address space conflict") - } - return - } - p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM || (GOOS == "solaris" && err == _sunosEAGAIN) { throw("runtime: out of memory") |