aboutsummaryrefslogtreecommitdiff
path: root/misc
diff options
context:
space:
mode:
authordoujiang24 <doujiang24@gmail.com>2023-03-24 01:30:46 +0000
committerCherry Mui <cherryyz@google.com>2023-03-24 16:00:24 +0000
commitef0dedce87b505f6115dd39b4329da9e9231ff95 (patch)
tree790632c8421af9f34fd84b70b2802ffb964a0533 /misc
parenta6c382eaa8eaa611d71232aa4d5391b56a5c2693 (diff)
downloadgo-ef0dedce87b505f6115dd39b4329da9e9231ff95.tar.gz
go-ef0dedce87b505f6115dd39b4329da9e9231ff95.zip
runtime/cgo: store M for C-created thread in pthread key
In a C thread, it's necessary to acquire an extra M by using needm while invoking a Go function from C. But, needm and dropm are heavy costs due to the signal-related syscalls. So, we change to not dropm while returning back to C, which means binding the extra M to the C thread until it exits, to avoid needm and dropm on each C to Go call. Instead, we only dropm while the C thread exits, so the extra M won't leak. When invoking a Go function from C: Allocate a pthread variable using pthread_key_create, only once per shared object, and register a thread-exit-time destructor. And store the g0 of the current m into the thread-specified value of the pthread key, only once per C thread, so that the destructor will put the extra M back onto the extra M list while the C thread exits. When returning back to C: Skip dropm in cgocallback, when the pthread variable has been created, so that the extra M will be reused the next time invoke a Go function from C. This is purely a performance optimization. The old version, in which needm & dropm happen on each cgo call, is still correct too, and we have to keep the old version on systems with cgo but without pthreads, like Windows. This optimization is significant, and the specific value depends on the OS system and CPU, but in general, it can be considered as 10x faster, for a simple Go function call from a C thread. For the newly added BenchmarkCGoInCThread, some benchmark results: 1. it's 28x faster, from 3395 ns/op to 121 ns/op, in darwin OS & Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz 2. it's 6.5x faster, from 1495 ns/op to 230 ns/op, in Linux OS & Intel(R) Xeon(R) CPU E5-2630 0 @ 2.30GHz Fixes #51676 Change-Id: I380702fe2f9b6b401b2d6f04b0aba990f4b9ee6c GitHub-Last-Rev: 93dc64ad98e5583372e41f65ee4b7ab78b5aff51 GitHub-Pull-Request: golang/go#51679 Reviewed-on: https://go-review.googlesource.com/c/go/+/392854 Reviewed-by: Ian Lance Taylor <iant@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: thepudds <thepudds1460@gmail.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'misc')
-rw-r--r--misc/cgo/test/cgo_test.go7
-rw-r--r--misc/cgo/test/cthread_unix.c24
-rw-r--r--misc/cgo/test/cthread_windows.c22
-rw-r--r--misc/cgo/test/testx.go14
4 files changed, 64 insertions, 3 deletions
diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go
index 5b298954f5..0c3980c12d 100644
--- a/misc/cgo/test/cgo_test.go
+++ b/misc/cgo/test/cgo_test.go
@@ -104,6 +104,7 @@ func TestThreadLock(t *testing.T) { testThreadLockFunc(t) }
func TestUnsignedInt(t *testing.T) { testUnsignedInt(t) }
func TestZeroArgCallback(t *testing.T) { testZeroArgCallback(t) }
-func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
-func BenchmarkGoString(b *testing.B) { benchGoString(b) }
-func BenchmarkCGoCallback(b *testing.B) { benchCallback(b) }
+func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
+func BenchmarkGoString(b *testing.B) { benchGoString(b) }
+func BenchmarkCGoCallback(b *testing.B) { benchCallback(b) }
+func BenchmarkCGoInCThread(b *testing.B) { benchCGoInCthread(b) }
diff --git a/misc/cgo/test/cthread_unix.c b/misc/cgo/test/cthread_unix.c
index 247d636d06..13623254a9 100644
--- a/misc/cgo/test/cthread_unix.c
+++ b/misc/cgo/test/cthread_unix.c
@@ -32,3 +32,27 @@ doAdd(int max, int nthread)
for(i=0; i<nthread; i++)
pthread_join(thread_id[i], 0);
}
+
+static void*
+goDummyCallbackThread(void* p)
+{
+ int i, max;
+
+ max = *(int*)p;
+ for(i=0; i<max; i++)
+ goDummy();
+ return NULL;
+}
+
+int
+callGoInCThread(int max)
+{
+ pthread_t thread;
+
+ if (pthread_create(&thread, NULL, goDummyCallbackThread, (void*)(&max)) != 0)
+ return -1;
+ if (pthread_join(thread, NULL) != 0)
+ return -1;
+
+ return max;
+}
diff --git a/misc/cgo/test/cthread_windows.c b/misc/cgo/test/cthread_windows.c
index 3a62ddd373..4e52209dee 100644
--- a/misc/cgo/test/cthread_windows.c
+++ b/misc/cgo/test/cthread_windows.c
@@ -35,3 +35,25 @@ doAdd(int max, int nthread)
CloseHandle((HANDLE)thread_id[i]);
}
}
+
+__stdcall
+static unsigned int
+goDummyCallbackThread(void* p)
+{
+ int i, max;
+
+ max = *(int*)p;
+ for(i=0; i<max; i++)
+ goDummy();
+ return 0;
+}
+
+int
+callGoInCThread(int max)
+{
+ uintptr_t thread_id;
+ thread_id = _beginthreadex(0, 0, goDummyCallbackThread, &max, 0, 0);
+ WaitForSingleObject((HANDLE)thread_id, INFINITE);
+ CloseHandle((HANDLE)thread_id);
+ return max;
+}
diff --git a/misc/cgo/test/testx.go b/misc/cgo/test/testx.go
index 6a8e97ddf3..0e2a51a522 100644
--- a/misc/cgo/test/testx.go
+++ b/misc/cgo/test/testx.go
@@ -24,6 +24,7 @@ import (
/*
// threads
extern void doAdd(int, int);
+extern int callGoInCThread(int);
// issue 1328
void IntoC(void);
@@ -146,6 +147,10 @@ func Add(x int) {
*p = 2
}
+//export goDummy
+func goDummy() {
+}
+
func testCthread(t *testing.T) {
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" {
t.Skip("the iOS exec wrapper is unable to properly handle the panic from Add")
@@ -159,6 +164,15 @@ func testCthread(t *testing.T) {
}
}
+// Benchmark measuring overhead from C to Go in a C thread.
+// Create a new C thread and invoke Go function repeatedly in the new C thread.
+func benchCGoInCthread(b *testing.B) {
+ n := C.callGoInCThread(C.int(b.N))
+ if int(n) != b.N {
+ b.Fatal("unmatch loop times")
+ }
+}
+
// issue 1328
//export BackIntoGo