aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/traceregion.go
blob: 43eef9c92bb9031c5848f09fe47f62dc415a1f01 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Simple not-in-heap bump-pointer traceRegion allocator.

package runtime

import (
	"internal/runtime/atomic"
	"runtime/internal/sys"
	"unsafe"
)

// traceRegionAlloc is a thread-safe region allocator.
// It holds a linked list of traceRegionAllocBlock.
type traceRegionAlloc struct {
	lock     mutex
	dropping atomic.Bool          // For checking invariants.
	current  atomic.UnsafePointer // *traceRegionAllocBlock
	full     *traceRegionAllocBlock
}

// traceRegionAllocBlock is a block in traceRegionAlloc.
//
// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
// not need write barriers.
type traceRegionAllocBlock struct {
	_ sys.NotInHeap
	traceRegionAllocBlockHeader
	data [traceRegionAllocBlockData]byte
}

type traceRegionAllocBlockHeader struct {
	next *traceRegionAllocBlock
	off  atomic.Uintptr
}

const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})

// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
	n = alignUp(n, 8)
	if n > traceRegionAllocBlockData {
		throw("traceRegion: alloc too large")
	}
	if a.dropping.Load() {
		throw("traceRegion: alloc with concurrent drop")
	}

	// Try to bump-pointer allocate into the current block.
	block := (*traceRegionAllocBlock)(a.current.Load())
	if block != nil {
		r := block.off.Add(n)
		if r <= uintptr(len(block.data)) {
			return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
		}
	}

	// Try to install a new block.
	lock(&a.lock)

	// Check block again under the lock. Someone may
	// have gotten here first.
	block = (*traceRegionAllocBlock)(a.current.Load())
	if block != nil {
		r := block.off.Add(n)
		if r <= uintptr(len(block.data)) {
			unlock(&a.lock)
			return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
		}

		// Add the existing block to the full list.
		block.next = a.full
		a.full = block
	}

	// Allocate a new block.
	block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
	if block == nil {
		throw("traceRegion: out of memory")
	}

	// Allocate space for our current request, so we always make
	// progress.
	block.off.Store(n)
	x := (*notInHeap)(unsafe.Pointer(&block.data[0]))

	// Publish the new block.
	a.current.Store(unsafe.Pointer(block))
	unlock(&a.lock)
	return x
}

// drop frees all previously allocated memory and resets the allocator.
//
// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
// must ensure that it is not possible for anything else to be using the same structure.
func (a *traceRegionAlloc) drop() {
	a.dropping.Store(true)
	for a.full != nil {
		block := a.full
		a.full = block.next
		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
	}
	if current := a.current.Load(); current != nil {
		sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
		a.current.Store(nil)
	}
	a.dropping.Store(false)
}