diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2016-02-26 21:57:16 +0100 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2016-05-03 11:00:43 +0000 |
commit | caa21475328999c1cd108b71ceb6efb7f4cf8fc4 (patch) | |
tree | 9555dae9965819297a5f490ca45c6c4c8cf2c1e8 /src/runtime/race.go | |
parent | fcd7c02c70a110c6f6dbac30ad4ac3eb435ac3fd (diff) | |
download | go-caa21475328999c1cd108b71ceb6efb7f4cf8fc4.tar.gz go-caa21475328999c1cd108b71ceb6efb7f4cf8fc4.zip |
runtime: per-P contexts for race detector
Race runtime also needs local malloc caches and currently uses
a mix of per-OS-thread and per-goroutine caches. This leads to
increased memory consumption. But more importantly cache of
synchronization objects is per-goroutine and we don't always
have goroutine context when feeing memory in GC. As the result
synchronization object descriptors leak (more precisely, they
can be reused if another synchronization object is recreated
at the same address, but it does not always help). For example,
the added BenchmarkSyncLeak has effectively runaway memory
consumption (based on a real long running server).
This change updates race runtime with support for per-P contexts.
BenchmarkSyncLeak now stabilizes at ~1GB memory consumption.
Long term, this will allow us to remove race runtime dependency
on glibc (as malloc is the main cornerstone).
I've also implemented a different scheme to pass P context to
race runtime: scheduler notified race runtime about association
between G and P by calling procwire(g, p)/procunwire(g, p).
But it turned out to be very messy as we have lots of places
where the association changes (e.g. syscalls). So I dropped it
in favor of the current scheme: race runtime asks scheduler
about the current P.
Fixes #14533
Change-Id: Iad10d2f816a44affae1b9fed446b3580eafd8c69
Reviewed-on: https://go-review.googlesource.com/19970
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/runtime/race.go')
-rw-r--r-- | src/runtime/race.go | 89 |
1 files changed, 76 insertions, 13 deletions
diff --git a/src/runtime/race.go b/src/runtime/race.go index beca47ed21..ecd68d80ce 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -58,7 +58,7 @@ func racereadpc(addr unsafe.Pointer, callpc, pc uintptr) //go:noescape func racewritepc(addr unsafe.Pointer, callpc, pc uintptr) -type symbolizeContext struct { +type symbolizeCodeContext struct { pc uintptr fn *byte file *byte @@ -70,8 +70,27 @@ type symbolizeContext struct { var qq = [...]byte{'?', '?', 0} var dash = [...]byte{'-', 0} +const ( + raceGetProcCmd = iota + raceSymbolizeCodeCmd + raceSymbolizeDataCmd +) + // Callback from C into Go, runs on g0. -func racesymbolize(ctx *symbolizeContext) { +func racecallback(cmd uintptr, ctx unsafe.Pointer) { + switch cmd { + case raceGetProcCmd: + throw("should have been handled by racecallbackthunk") + case raceSymbolizeCodeCmd: + raceSymbolizeCode((*symbolizeCodeContext)(ctx)) + case raceSymbolizeDataCmd: + raceSymbolizeData((*symbolizeDataContext)(ctx)) + default: + throw("unknown command") + } +} + +func raceSymbolizeCode(ctx *symbolizeCodeContext) { f := findfunc(ctx.pc) if f == nil { ctx.fn = &qq[0] @@ -91,6 +110,26 @@ func racesymbolize(ctx *symbolizeContext) { return } +type symbolizeDataContext struct { + addr uintptr + heap uintptr + start uintptr + size uintptr + name *byte + file *byte + line uintptr + res uintptr +} + +func raceSymbolizeData(ctx *symbolizeDataContext) { + if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil { + ctx.heap = 1 + ctx.start = uintptr(x) + ctx.size = n + ctx.res = 1 + } +} + // Race runtime functions called via runtime·racecall. //go:linkname __tsan_init __tsan_init var __tsan_init byte @@ -98,6 +137,12 @@ var __tsan_init byte //go:linkname __tsan_fini __tsan_fini var __tsan_fini byte +//go:linkname __tsan_proc_create __tsan_proc_create +var __tsan_proc_create byte + +//go:linkname __tsan_proc_destroy __tsan_proc_destroy +var __tsan_proc_destroy byte + //go:linkname __tsan_map_shadow __tsan_map_shadow var __tsan_map_shadow byte @@ -113,6 +158,9 @@ var __tsan_go_end byte //go:linkname __tsan_malloc __tsan_malloc var __tsan_malloc byte +//go:linkname __tsan_free __tsan_free +var __tsan_free byte + //go:linkname __tsan_acquire __tsan_acquire var __tsan_acquire byte @@ -131,11 +179,14 @@ var __tsan_go_ignore_sync_end byte // Mimic what cmd/cgo would do. //go:cgo_import_static __tsan_init //go:cgo_import_static __tsan_fini +//go:cgo_import_static __tsan_proc_create +//go:cgo_import_static __tsan_proc_destroy //go:cgo_import_static __tsan_map_shadow //go:cgo_import_static __tsan_finalizer_goroutine //go:cgo_import_static __tsan_go_start //go:cgo_import_static __tsan_go_end //go:cgo_import_static __tsan_malloc +//go:cgo_import_static __tsan_free //go:cgo_import_static __tsan_acquire //go:cgo_import_static __tsan_release //go:cgo_import_static __tsan_release_merge @@ -175,7 +226,7 @@ func racefuncenter(uintptr) func racefuncexit() func racereadrangepc1(uintptr, uintptr, uintptr) func racewriterangepc1(uintptr, uintptr, uintptr) -func racesymbolizethunk(uintptr) +func racecallbackthunk(uintptr) // racecall allows calling an arbitrary function f from C race runtime // with up to 4 uintptr arguments. @@ -189,14 +240,13 @@ func isvalidaddr(addr unsafe.Pointer) bool { } //go:nosplit -func raceinit() uintptr { +func raceinit() (gctx, pctx uintptr) { // cgo is required to initialize libc, which is used by race runtime if !iscgo { throw("raceinit: race build must use cgo") } - var racectx uintptr - racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0) + racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0) // Round data segment to page boundaries, because it's used in mmap(). start := ^uintptr(0) @@ -230,7 +280,7 @@ func raceinit() uintptr { racedatastart = start racedataend = start + size - return racectx + return } //go:nosplit @@ -239,6 +289,18 @@ func racefini() { } //go:nosplit +func raceproccreate() uintptr { + var ctx uintptr + racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0) + return ctx +} + +//go:nosplit +func raceprocdestroy(ctx uintptr) { + racecall(&__tsan_proc_destroy, ctx, 0, 0, 0) +} + +//go:nosplit func racemapshadow(addr unsafe.Pointer, size uintptr) { if racearenastart == 0 { racearenastart = uintptr(addr) @@ -251,7 +313,12 @@ func racemapshadow(addr unsafe.Pointer, size uintptr) { //go:nosplit func racemalloc(p unsafe.Pointer, sz uintptr) { - racecall(&__tsan_malloc, uintptr(p), sz, 0, 0) + racecall(&__tsan_malloc, 0, 0, uintptr(p), sz) +} + +//go:nosplit +func racefree(p unsafe.Pointer, sz uintptr) { + racecall(&__tsan_free, uintptr(p), sz, 0, 0) } //go:nosplit @@ -323,11 +390,7 @@ func raceacquireg(gp *g, addr unsafe.Pointer) { //go:nosplit func racerelease(addr unsafe.Pointer) { - _g_ := getg() - if _g_.raceignore != 0 || !isvalidaddr(addr) { - return - } - racereleaseg(_g_, addr) + racereleaseg(getg(), addr) } //go:nosplit |