From bbee5236cd64a11a2bfd194eb9f809f51cae0870 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Mon, 10 Aug 2020 08:01:21 -0700 Subject: [release-branch.go1.15] cmd/internal/obj: fix inline marker issue on s390x The optimization that replaces inline markers with pre-existing instructions assumes that 'Prog' values produced by the compiler are still reachable after the assembler has run. This was not true on s390x where the assembler was removing NOP instructions from the linked list of 'Prog' values. This led to broken inlining data which in turn caused an infinite loop in the runtime traceback code. Fix this by stopping the s390x assembler backend removing NOP values. It does not make any difference to the output of the assembler because NOP instructions are 0 bytes long anyway. Note: compiler check omitted from backport to reduce risk of change. Fixes #40693. Change-Id: I4eb570de13165cde342d5fb2ee3218945ddf4b52 Reviewed-on: https://go-review.googlesource.com/c/go/+/248478 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Dmitri Shuralyov Reviewed-by: Keith Randall --- src/cmd/internal/obj/s390x/objz.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index b14dc810fa..ef6335d849 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -283,17 +283,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ACMPUBNE: q = p p.Mark |= BRANCH - if p.Pcond != nil { - q := p.Pcond - for q.As == obj.ANOP { - q = q.Link - p.Pcond = q - } - } - - case obj.ANOP: - q.Link = p.Link /* q is non-nop */ - p.Link.Mark |= p.Mark default: q = p -- cgit v1.2.3-54-g00ecf From b1be1428dc7d988c2be9006b1cbdf3e513d299b6 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 9 Sep 2020 16:52:18 +0000 Subject: [release-branch.go1.15] runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration Both ReadMemStatsSlow and CheckScavengedBits iterate over the page allocator's chunks but don't actually check if they exist. During the development process the chunks index became sparse, so now this was a possibility. If the runtime tests' heap is sparse we might end up segfaulting in either one of these functions, though this will generally be very rare. The pattern here to return nil for a nonexistent chunk is also useful elsewhere, so this change introduces tryChunkOf which won't throw, but might return nil. It also updates the documentation of chunkOf. For #41296. Fixes #41317. Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/253777 Run-TryBot: Michael Knyszek Reviewed-by: Keith Randall TryBot-Result: Gobot Gobot (cherry picked from commit 34835df04891a1d54394888b763af88f9476101d) Reviewed-on: https://go-review.googlesource.com/c/go/+/253917 --- src/runtime/export_test.go | 17 ++++++++++------- src/runtime/mpagealloc.go | 13 +++++++++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 5ab03f3f99..e43813bcc4 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -360,7 +360,11 @@ func ReadMemStatsSlow() (base, slow MemStats) { } for i := mheap_.pages.start; i < mheap_.pages.end; i++ { - pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages) + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } + pg := chunk.scavenged.popcntRange(0, pallocChunkPages) slow.HeapReleased += uint64(pg) * pageSize } for _, p := range allp { @@ -758,11 +762,7 @@ func (p *PageAlloc) InUse() []AddrRange { // Returns nil if the PallocData's L2 is missing. func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData { ci := chunkIdx(i) - l2 := (*pageAlloc)(p).chunks[ci.l1()] - if l2 == nil { - return nil - } - return (*PallocData)(&l2[ci.l2()]) + return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci)) } // AddrRange represents a range over addresses. @@ -902,7 +902,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { lock(&mheap_.lock) chunkLoop: for i := mheap_.pages.start; i < mheap_.pages.end; i++ { - chunk := mheap_.pages.chunkOf(i) + chunk := mheap_.pages.tryChunkOf(i) + if chunk == nil { + continue + } for j := 0; j < pallocChunkPages/64; j++ { // Run over each 64-bit bitmap section and ensure // scavenged is being cleared properly on allocation. diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 8b3c62c375..c90a6378bd 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { s.scav.scavLWM = maxSearchAddr } +// tryChunkOf returns the bitmap data for the given chunk. +// +// Returns nil if the chunk data has not been mapped. +func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { + l2 := s.chunks[ci.l1()] + if l2 == nil { + return nil + } + return &l2[ci.l2()] +} + // chunkOf returns the chunk at the given chunk index. +// +// The chunk index must be valid or this method may throw. func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData { return &s.chunks[ci.l1()][ci.l2()] } -- cgit v1.2.3-54-g00ecf From fa01c8a14eeb760c58c5cd09afec327fdd392b2c Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 21 Sep 2020 17:05:11 -0700 Subject: [release-branch.go1.15] internal/poll: adjust ignoringEINTR to avoid slice escape The 1.15 compiler is not quite smart enough to see that the byte slice passed to ignoringEINTR does not escape. This ripples back up to user code which would see a byte slice passed to os.(*File).Write escape, which did not happen in 1.14. Rather than backport some moderately complex compiler fixes, rewrite the code slightly so that the 1.15 compiler is able to see that the slice does not escape. This is not a backport from tip, where the code is already different. The test for this will be on tip, where we will most likely change the compiler to understand this kind of code. Fixes #41543 For #41474 Change-Id: I6c78164229fea7794e7edba512bfd7034a0b91c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/256418 Trust: Ian Lance Taylor Reviewed-by: Matthew Dempsky --- src/internal/poll/fd_unix.go | 12 ++++++------ src/runtime/trace/trace_stack_test.go | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index 4872fa9851..61ffa82b14 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -152,7 +152,7 @@ func (fd *FD) Read(p []byte) (int, error) { p = p[:maxRW] } for { - n, err := ignoringEINTR(syscall.Read, fd.Sysfd, p) + n, err := ignoringEINTR(func() (int, error) { return syscall.Read(fd.Sysfd, p) }) if err != nil { n = 0 if err == syscall.EAGAIN && fd.pd.pollable() { @@ -264,7 +264,7 @@ func (fd *FD) Write(p []byte) (int, error) { if fd.IsStream && max-nn > maxRW { max = nn + maxRW } - n, err := ignoringEINTR(syscall.Write, fd.Sysfd, p[nn:max]) + n, err := ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p[nn:max]) }) if n > 0 { nn += n } @@ -423,7 +423,7 @@ func (fd *FD) ReadDirent(buf []byte) (int, error) { } defer fd.decref() for { - n, err := ignoringEINTR(syscall.ReadDirent, fd.Sysfd, buf) + n, err := ignoringEINTR(func() (int, error) { return syscall.ReadDirent(fd.Sysfd, buf) }) if err != nil { n = 0 if err == syscall.EAGAIN && fd.pd.pollable() { @@ -514,7 +514,7 @@ func (fd *FD) WriteOnce(p []byte) (int, error) { return 0, err } defer fd.writeUnlock() - return ignoringEINTR(syscall.Write, fd.Sysfd, p) + return ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p) }) } // RawRead invokes the user-defined function f for a read operation. @@ -562,9 +562,9 @@ func (fd *FD) RawWrite(f func(uintptr) bool) error { // installed without setting SA_RESTART. None of these are the common case, // but there are enough of them that it seems that we can't avoid // an EINTR loop. -func ignoringEINTR(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) { +func ignoringEINTR(fn func() (int, error)) (int, error) { for { - n, err := fn(fd, p) + n, err := fn() if err != syscall.EINTR { return n, err } diff --git a/src/runtime/trace/trace_stack_test.go b/src/runtime/trace/trace_stack_test.go index cfc0419b72..f856fdcd01 100644 --- a/src/runtime/trace/trace_stack_test.go +++ b/src/runtime/trace/trace_stack_test.go @@ -252,6 +252,7 @@ func TestTraceSymbolize(t *testing.T) { {trace.EvGoSysCall, []frame{ {"syscall.read", 0}, {"syscall.Read", 0}, + {"internal/poll.(*FD).Read.func1", 0}, {"internal/poll.ignoringEINTR", 0}, {"internal/poll.(*FD).Read", 0}, {"os.(*File).read", 0}, -- cgit v1.2.3-54-g00ecf From fa262e61fd8e123c53023c122ab6330a8ae28aa3 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 24 Sep 2020 14:25:21 -0700 Subject: [release-branch.go1.15] cmd/compile: prevent 387+float32+pie from clobbering registers The 387 port needs to load a floating-point control word from a global location to implement float32 arithmetic. When compiling with -pie, loading that control word clobbers an integer register. If that register had something important in it, boom. Fix by using LEAL to materialize the address of the global location first. LEAL with -pie works because the destination register is used as the scratch register. 387 support is about to go away (#40255), so this will need to be backported to have any effect. No test. I have one, but it requires building with -pie, which requires cgo. Our testing infrastructure doesn't make that easy. Not worth it for a port which is about to vanish. Fixes #41620 Change-Id: I140f9fc8fdce4e74a52c2c046e2bd30ae476d295 Reviewed-on: https://go-review.googlesource.com/c/go/+/257277 Run-TryBot: Keith Randall Reviewed-by: Cherry Zhang Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot Trust: Keith Randall (cherry picked from commit ea106cc07ac73110a8a25fcc5aef07b283159db0) Reviewed-on: https://go-review.googlesource.com/c/go/+/257207 --- src/cmd/compile/internal/x86/387.go | 68 ++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go index 796aa82f19..594adb2cd5 100644 --- a/src/cmd/compile/internal/x86/387.go +++ b/src/cmd/compile/internal/x86/387.go @@ -139,12 +139,18 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { // Set precision if needed. 64 bits is the default. switch v.Op { case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - p := s.Prog(x86.AFSTCW) + // Save AX so we can use it as scratch space. + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX s.AddrScratch(&p.To) - p = s.Prog(x86.AFLDCW) - p.From.Type = obj.TYPE_MEM - p.From.Name = obj.NAME_EXTERN - p.From.Sym = gc.ControlWord32 + // Install a 32-bit version of the control word. + installControlWord(s, gc.ControlWord32, x86.REG_AX) + // Restore AX. + p = s.Prog(x86.AMOVL) + s.AddrScratch(&p.From) + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX } var op obj.As @@ -167,8 +173,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { // Restore precision if needed. switch v.Op { case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - p := s.Prog(x86.AFLDCW) - s.AddrScratch(&p.From) + restoreControlWord(s) } case ssa.Op386UCOMISS, ssa.Op386UCOMISD: @@ -225,19 +230,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL: push(s, v.Args[0]) - // Save control word. - p := s.Prog(x86.AFSTCW) - s.AddrScratch(&p.To) - p.To.Offset += 4 - // Load control word which truncates (rounds towards zero). - p = s.Prog(x86.AFLDCW) - p.From.Type = obj.TYPE_MEM - p.From.Name = obj.NAME_EXTERN - p.From.Sym = gc.ControlWord64trunc + installControlWord(s, gc.ControlWord64trunc, v.Reg()) // Now do the conversion. - p = s.Prog(x86.AFMOVLP) + p := s.Prog(x86.AFMOVLP) p.From.Type = obj.TYPE_REG p.From.Reg = x86.REG_F0 s.AddrScratch(&p.To) @@ -247,9 +244,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() // Restore control word. - p = s.Prog(x86.AFLDCW) - s.AddrScratch(&p.From) - p.From.Offset += 4 + restoreControlWord(s) case ssa.Op386CVTSS2SD: // float32 -> float64 is a nop @@ -373,3 +368,36 @@ func ssaGenBlock387(s *gc.SSAGenState, b, next *ssa.Block) { ssaGenBlock(s, b, next) } + +// installControlWord saves the current floating-point control +// word and installs a new one loaded from cw. +// scratchReg must be an unused register. +// This call must be paired with restoreControlWord. +// Bytes 4-5 of the scratch space (s.AddrScratch) are used between +// this call and restoreControlWord. +func installControlWord(s *gc.SSAGenState, cw *obj.LSym, scratchReg int16) { + // Save current control word. + p := s.Prog(x86.AFSTCW) + s.AddrScratch(&p.To) + p.To.Offset += 4 + + // Materialize address of new control word. + // Note: this must be a seperate instruction to handle PIE correctly. + // See issue 41503. + p = s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Sym = cw + p.To.Type = obj.TYPE_REG + p.To.Reg = scratchReg + + // Load replacement control word. + p = s.Prog(x86.AFLDCW) + p.From.Type = obj.TYPE_MEM + p.From.Reg = scratchReg +} +func restoreControlWord(s *gc.SSAGenState) { + p := s.Prog(x86.AFLDCW) + s.AddrScratch(&p.From) + p.From.Offset += 4 +} -- cgit v1.2.3-54-g00ecf From c25dd3f8325a5022a2573c71afd810f3fb604b08 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 18 Aug 2020 22:47:12 -0400 Subject: [release-branch.go1.15] testing: flush test summaries to stdout atomically when streaming output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While debugging #40771, I realized that the chatty printer should only ever print to a single io.Writer (normally os.Stdout). The other Writer implementations in the chain write to local buffers, but if we wrote a test's output to a local buffer, then we did *not* write it to stdout and we should not store it as the most recently logged test. Because the chatty printer should only ever print to one place, it shouldn't receive an io.Writer as an argument — rather, it shouldn't be used at all for destinations other than the main output stream. On the other hand, when we flush the output buffer to stdout in the top-level flushToParent call, it is important that we not allow some other test's output to intrude between the test summary header and the remainder of the test's output. cmd/test2json doesn't know how to parse such an intrusion, and it's confusing to humans too. No test because I couldn't reproduce the user-reported error without modifying the testing package. (This behavior seems to be very sensitive to output size and/or goroutine scheduling.) Fixes #40881 Updates #40771 Updates #38458 Change-Id: Ic19bf1d535672b096ba1c8583a3b74aab6d6d766 Reviewed-on: https://go-review.googlesource.com/c/go/+/249026 Run-TryBot: Bryan C. Mills TryBot-Result: Gobot Gobot Reviewed-by: Jay Conrod Reviewed-by: Ian Lance Taylor (cherry picked from commit 51c0bdc6d15dcd7f753c25896039ab41ac787ebb) Reviewed-on: https://go-review.googlesource.com/c/go/+/252637 TryBot-Result: Go Bot Trust: Bryan C. Mills --- src/testing/benchmark.go | 18 ++++--- src/testing/sub_test.go | 19 +++++-- src/testing/testing.go | 138 ++++++++++++++++++++++------------------------- 3 files changed, 88 insertions(+), 87 deletions(-) diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go index 52766005bf..e9687bf26d 100644 --- a/src/testing/benchmark.go +++ b/src/testing/benchmark.go @@ -242,7 +242,7 @@ func (b *B) run1() bool { if b.skipped { tag = "SKIP" } - if b.chatty && (len(b.output) > 0 || b.finished) { + if b.chatty != nil && (len(b.output) > 0 || b.finished) { b.trimOutput() fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output) } @@ -523,10 +523,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e } main := &B{ common: common{ - name: "Main", - w: os.Stdout, - chatty: *chatty, - bench: true, + name: "Main", + w: os.Stdout, + bench: true, }, importPath: importPath, benchFunc: func(b *B) { @@ -537,6 +536,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e benchTime: benchTime, context: ctx, } + if Verbose() { + main.chatty = newChattyPrinter(main.w) + } main.runN(1) return !main.failed } @@ -549,7 +551,7 @@ func (ctx *benchContext) processBench(b *B) { benchName := benchmarkName(b.name, procs) // If it's chatty, we've already printed this information. - if !b.chatty { + if b.chatty == nil { fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) } // Recompute the running time for all but the first iteration. @@ -576,7 +578,7 @@ func (ctx *benchContext) processBench(b *B) { continue } results := r.String() - if b.chatty { + if b.chatty != nil { fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) } if *benchmarkMemory || b.showAllocResult { @@ -639,7 +641,7 @@ func (b *B) Run(name string, f func(b *B)) bool { atomic.StoreInt32(&sub.hasSub, 1) } - if b.chatty { + if b.chatty != nil { labelsOnce.Do(func() { fmt.Printf("goos: %s\n", runtime.GOOS) fmt.Printf("goarch: %s\n", runtime.GOARCH) diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index 8eb0084b1c..5ed3fc4afe 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -483,10 +483,12 @@ func TestTRun(t *T) { signal: make(chan bool), name: "Test", w: buf, - chatty: tc.chatty, }, context: ctx, } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + } ok := root.Run(tc.desc, tc.f) ctx.release() @@ -665,11 +667,13 @@ func TestBRun(t *T) { signal: make(chan bool), name: "root", w: buf, - chatty: tc.chatty, }, benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure. benchTime: benchTimeFlag{d: 1 * time.Microsecond}, } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + } root.runN(1) if ok != !tc.failed { t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed) @@ -741,9 +745,13 @@ func TestParallelSub(t *T) { } } -type funcWriter func([]byte) (int, error) +type funcWriter struct { + write func([]byte) (int, error) +} -func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) } +func (fw *funcWriter) Write(b []byte) (int, error) { + return fw.write(b) +} func TestRacyOutput(t *T) { var runs int32 // The number of running Writes @@ -761,9 +769,10 @@ func TestRacyOutput(t *T) { var wg sync.WaitGroup root := &T{ - common: common{w: funcWriter(raceDetector), chatty: true}, + common: common{w: &funcWriter{raceDetector}}, context: newTestContext(1, newMatcher(regexp.MatchString, "", "")), } + root.chatty = newChattyPrinter(root.w) root.Run("", func(t *T) { for i := 0; i < 100; i++ { wg.Add(1) diff --git a/src/testing/testing.go b/src/testing/testing.go index bf83df8863..0055a4cfe2 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -325,7 +325,6 @@ var ( cpuListStr *string parallel *int testlog *string - printer *testPrinter haveExamples bool // are there examples? @@ -335,55 +334,45 @@ var ( numFailed uint32 // number of test failures ) -type testPrinter struct { - chatty bool - +type chattyPrinter struct { + w io.Writer lastNameMu sync.Mutex // guards lastName lastName string // last printed test name in chatty mode } -func newTestPrinter(chatty bool) *testPrinter { - return &testPrinter{ - chatty: chatty, - } +func newChattyPrinter(w io.Writer) *chattyPrinter { + return &chattyPrinter{w: w} } -func (p *testPrinter) Print(testName, out string) { - p.Fprint(os.Stdout, testName, out) +// Updatef prints a message about the status of the named test to w. +// +// The formatted message must include the test name itself. +func (p *chattyPrinter) Updatef(testName, format string, args ...interface{}) { + p.lastNameMu.Lock() + defer p.lastNameMu.Unlock() + + // Since the message already implies an association with a specific new test, + // we don't need to check what the old test name was or log an extra CONT line + // for it. (We're updating it anyway, and the current message already includes + // the test name.) + p.lastName = testName + fmt.Fprintf(p.w, format, args...) } -func (p *testPrinter) Fprint(w io.Writer, testName, out string) { +// Printf prints a message, generated by the named test, that does not +// necessarily mention that tests's name itself. +func (p *chattyPrinter) Printf(testName, format string, args ...interface{}) { p.lastNameMu.Lock() defer p.lastNameMu.Unlock() - if !p.chatty || - strings.HasPrefix(out, "--- PASS: ") || - strings.HasPrefix(out, "--- FAIL: ") || - strings.HasPrefix(out, "--- SKIP: ") || - strings.HasPrefix(out, "=== RUN ") || - strings.HasPrefix(out, "=== CONT ") || - strings.HasPrefix(out, "=== PAUSE ") { - // If we're buffering test output (!p.chatty), we don't really care which - // test is emitting which line so long as they are serialized. - // - // If the message already implies an association with a specific new test, - // we don't need to check what the old test name was or log an extra CONT - // line for it. (We're updating it anyway, and the current message already - // includes the test name.) - p.lastName = testName - fmt.Fprint(w, out) - return - } - if p.lastName == "" { p.lastName = testName } else if p.lastName != testName { - // Always printed as-is, with 0 decoration or indentation. So, we skip - // printing to w. - fmt.Printf("=== CONT %s\n", testName) + fmt.Fprintf(p.w, "=== CONT %s\n", testName) p.lastName = testName } - fmt.Fprint(w, out) + + fmt.Fprintf(p.w, format, args...) } // The maximum number of stack frames to go through when skipping helper functions for @@ -405,12 +394,12 @@ type common struct { cleanupName string // Name of the cleanup function. cleanupPc []uintptr // The stack trace at the point where Cleanup was called. - chatty bool // A copy of the chatty flag. - bench bool // Whether the current test is a benchmark. - finished bool // Test function has completed. - hasSub int32 // Written atomically. - raceErrors int // Number of races detected during test. - runner string // Function name of tRunner running the test. + chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set. + bench bool // Whether the current test is a benchmark. + finished bool // Test function has completed. + hasSub int32 // Written atomically. + raceErrors int // Number of races detected during test. + runner string // Function name of tRunner running the test. parent *common level int // Nesting depth of test or benchmark. @@ -572,12 +561,31 @@ func (c *common) flushToParent(testName, format string, args ...interface{}) { p.mu.Lock() defer p.mu.Unlock() - printer.Fprint(p.w, testName, fmt.Sprintf(format, args...)) - c.mu.Lock() defer c.mu.Unlock() - io.Copy(p.w, bytes.NewReader(c.output)) - c.output = c.output[:0] + + if len(c.output) > 0 { + format += "%s" + args = append(args[:len(args):len(args)], c.output) + c.output = c.output[:0] // but why? + } + + if c.chatty != nil && p.w == c.chatty.w { + // We're flushing to the actual output, so track that this output is + // associated with a specific test (and, specifically, that the next output + // is *not* associated with that test). + // + // Moreover, if c.output is non-empty it is important that this write be + // atomic with respect to the output of other tests, so that we don't end up + // with confusing '=== CONT' lines in the middle of our '--- PASS' block. + // Neither humans nor cmd/test2json can parse those easily. + // (See https://golang.org/issue/40771.) + c.chatty.Updatef(testName, format, args...) + } else { + // We're flushing to the output buffer of the parent test, which will + // itself follow a test-name header when it is finally flushed to stdout. + fmt.Fprintf(p.w, format, args...) + } } type indenter struct { @@ -746,13 +754,13 @@ func (c *common) logDepth(s string, depth int) { } panic("Log in goroutine after " + c.name + " has completed") } else { - if c.chatty { + if c.chatty != nil { if c.bench { // Benchmarks don't print === CONT, so we should skip the test // printer and just print straight to stdout. fmt.Print(c.decorate(s, depth+1)) } else { - printer.Print(c.name, c.decorate(s, depth+1)) + c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1)) } return @@ -983,34 +991,22 @@ func (t *T) Parallel() { t.parent.sub = append(t.parent.sub, t) t.raceErrors += race.Errors() - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() + if t.chatty != nil { // Unfortunately, even though PAUSE indicates that the named test is *no // longer* running, cmd/test2json interprets it as changing the active test // for the purpose of log parsing. We could fix cmd/test2json, but that // won't fix existing deployments of third-party tools that already shell // out to older builds of cmd/test2json — so merely fixing cmd/test2json // isn't enough for now. - printer.Fprint(root.w, t.name, fmt.Sprintf("=== PAUSE %s\n", t.name)) - root.mu.Unlock() + t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name) } t.signal <- true // Release calling test. <-t.parent.barrier // Wait for the parent test to complete. t.context.waitParallel() - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() - printer.Fprint(root.w, t.name, fmt.Sprintf("=== CONT %s\n", t.name)) - root.mu.Unlock() + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== CONT %s\n", t.name) } t.start = time.Now() @@ -1161,14 +1157,8 @@ func (t *T) Run(name string, f func(t *T)) bool { } t.w = indenter{&t.common} - if t.chatty { - // Print directly to root's io.Writer so there is no delay. - root := t.parent - for ; root.parent != nil; root = root.parent { - } - root.mu.Lock() - printer.Fprint(root.w, t.name, fmt.Sprintf("=== RUN %s\n", t.name)) - root.mu.Unlock() + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== RUN %s\n", t.name) } // Instead of reducing the running count of this test before calling the // tRunner and increasing it afterwards, we rely on tRunner keeping the @@ -1333,8 +1323,6 @@ func (m *M) Run() (code int) { flag.Parse() } - printer = newTestPrinter(Verbose()) - if *parallel < 1 { fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer") flag.Usage() @@ -1379,7 +1367,7 @@ func (t *T) report() { format := "--- %s: %s (%s)\n" if t.Failed() { t.flushToParent(t.name, format, "FAIL", t.name, dstr) - } else if t.chatty { + } else if t.chatty != nil { if t.Skipped() { t.flushToParent(t.name, format, "SKIP", t.name, dstr) } else { @@ -1440,10 +1428,12 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT signal: make(chan bool), barrier: make(chan bool), w: os.Stdout, - chatty: *chatty, }, context: ctx, } + if Verbose() { + t.chatty = newChattyPrinter(t.w) + } tRunner(t, func(t *T) { for _, test := range tests { t.Run(test.Name, test.F) -- cgit v1.2.3-54-g00ecf From 0893e6a43736a946d78e50e7422b457eb9ca9984 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 8 Aug 2020 07:58:04 -0700 Subject: [release-branch.go1.15] cmd/compile: fix live variable computation for deferreturn Taking the live variable set from the last return point is problematic. See #40629 for details, but there may not be a return point, or it may be before the final defer. Additionally, keeping track of the last call as a *Value doesn't quite work. If it is dead-code eliminated, the storage for the Value is reused for some other random instruction. Its live variable information, if it is available at all, is wrong. Instead, just mark all the open-defer argument slots as live throughout the function. (They are already zero-initialized.) Fixes #40742 Change-Id: Ie456c7db3082d0de57eaa5234a0f32525a1cce13 Reviewed-on: https://go-review.googlesource.com/c/go/+/247522 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Dan Scales (cherry picked from commit 32a84c99e136ed5af0686dbedd31fd7dff40fb38) Reviewed-on: https://go-review.googlesource.com/c/go/+/248621 Trust: Dmitri Shuralyov --- src/cmd/compile/internal/gc/plive.go | 118 ++++++++--------------------------- src/cmd/compile/internal/gc/ssa.go | 19 +----- src/cmd/compile/internal/ssa/func.go | 11 +--- test/fixedbugs/issue40629.go | 69 ++++++++++++++++++++ 4 files changed, 99 insertions(+), 118 deletions(-) create mode 100644 test/fixedbugs/issue40629.go diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 7e1c0c1a95..bdb458015f 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -140,24 +140,14 @@ type Liveness struct { regMaps []liveRegMask cache progeffectscache - - // These are only populated if open-coded defers are being used. - // List of vars/stack slots storing defer args - openDeferVars []openDeferVarInfo - // Map from defer arg OpVarDef to the block where the OpVarDef occurs. - openDeferVardefToBlockMap map[*Node]*ssa.Block - // Map of blocks that cannot reach a return or exit (panic) - nonReturnBlocks map[*ssa.Block]bool -} - -type openDeferVarInfo struct { - n *Node // Var/stack slot storing a defer arg - varsIndex int // Index of variable in lv.vars } // LivenessMap maps from *ssa.Value to LivenessIndex. type LivenessMap struct { vals map[ssa.ID]LivenessIndex + // The set of live, pointer-containing variables at the deferreturn + // call (only set when open-coded defers are used). + deferreturn LivenessIndex } func (m *LivenessMap) reset() { @@ -168,6 +158,7 @@ func (m *LivenessMap) reset() { delete(m.vals, k) } } + m.deferreturn = LivenessInvalid } func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) { @@ -542,7 +533,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt if cap(lc.be) >= f.NumBlocks() { lv.be = lc.be[:f.NumBlocks()] } - lv.livenessMap = LivenessMap{lc.livenessMap.vals} + lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid} lc.livenessMap.vals = nil } if lv.be == nil { @@ -893,58 +884,12 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool { func (lv *Liveness) prologue() { lv.initcache() - if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() { - lv.openDeferVardefToBlockMap = make(map[*Node]*ssa.Block) - for i, n := range lv.vars { - if n.Name.OpenDeferSlot() { - lv.openDeferVars = append(lv.openDeferVars, openDeferVarInfo{n: n, varsIndex: i}) - } - } - - // Find any blocks that cannot reach a return or a BlockExit - // (panic) -- these must be because of an infinite loop. - reachesRet := make(map[ssa.ID]bool) - blockList := make([]*ssa.Block, 0, 256) - - for _, b := range lv.f.Blocks { - if b.Kind == ssa.BlockRet || b.Kind == ssa.BlockRetJmp || b.Kind == ssa.BlockExit { - blockList = append(blockList, b) - } - } - - for len(blockList) > 0 { - b := blockList[0] - blockList = blockList[1:] - if reachesRet[b.ID] { - continue - } - reachesRet[b.ID] = true - for _, e := range b.Preds { - blockList = append(blockList, e.Block()) - } - } - - lv.nonReturnBlocks = make(map[*ssa.Block]bool) - for _, b := range lv.f.Blocks { - if !reachesRet[b.ID] { - lv.nonReturnBlocks[b] = true - //fmt.Println("No reach ret", lv.f.Name, b.ID, b.Kind) - } - } - } - for _, b := range lv.f.Blocks { be := lv.blockEffects(b) // Walk the block instructions backward and update the block // effects with the each prog effects. for j := len(b.Values) - 1; j >= 0; j-- { - if b.Values[j].Op == ssa.OpVarDef { - n := b.Values[j].Aux.(*Node) - if n.Name.OpenDeferSlot() { - lv.openDeferVardefToBlockMap[n] = b - } - } pos, e := lv.valueEffects(b.Values[j]) regUevar, regKill := lv.regEffects(b.Values[j]) if e&varkill != 0 { @@ -961,20 +906,6 @@ func (lv *Liveness) prologue() { } } -// markDeferVarsLive marks each variable storing an open-coded defer arg as -// specially live in block b if the variable definition dominates block b. -func (lv *Liveness) markDeferVarsLive(b *ssa.Block, newliveout *varRegVec) { - // Only force computation of dominators if we have a block where we need - // to specially mark defer args live. - sdom := lv.f.Sdom() - for _, info := range lv.openDeferVars { - defB := lv.openDeferVardefToBlockMap[info.n] - if sdom.IsAncestorEq(defB, b) { - newliveout.vars.Set(int32(info.varsIndex)) - } - } -} - // Solve the liveness dataflow equations. func (lv *Liveness) solve() { // These temporary bitvectors exist to avoid successive allocations and @@ -1018,23 +949,6 @@ func (lv *Liveness) solve() { } } - if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() && - (b.Kind == ssa.BlockExit || lv.nonReturnBlocks[b]) { - // Open-coded defer args slots must be live - // everywhere in a function, since a panic can - // occur (almost) anywhere. Force all appropriate - // defer arg slots to be live in BlockExit (panic) - // blocks and in blocks that do not reach a return - // (because of infinite loop). - // - // We are assuming that the defer exit code at - // BlockReturn/BlockReturnJmp accesses all of the - // defer args (with pointers), and so keeps them - // live. This analysis may have to be adjusted if - // that changes (because of optimizations). - lv.markDeferVarsLive(b, &newliveout) - } - if !be.liveout.Eq(newliveout) { change = true be.liveout.Copy(newliveout) @@ -1087,6 +1001,17 @@ func (lv *Liveness) epilogue() { n.Name.SetNeedzero(true) livedefer.Set(int32(i)) } + if n.Name.OpenDeferSlot() { + // Open-coded defer args slots must be live + // everywhere in a function, since a panic can + // occur (almost) anywhere. Because it is live + // everywhere, it must be zeroed on entry. + livedefer.Set(int32(i)) + // It was already marked as Needzero when created. + if !n.Name.Needzero() { + Fatalf("all pointer-containing defer arg slots should have Needzero set") + } + } } } @@ -1188,6 +1113,17 @@ func (lv *Liveness) epilogue() { lv.compact(b) } + // If we have an open-coded deferreturn call, make a liveness map for it. + if lv.fn.Func.OpenCodedDeferDisallowed() { + lv.livenessMap.deferreturn = LivenessInvalid + } else { + lv.livenessMap.deferreturn = LivenessIndex{ + stackMapIndex: lv.stackMapSet.add(livedefer), + regMapIndex: 0, // entry regMap, containing no live registers + isUnsafePoint: false, + } + } + // Done compacting. Throw out the stack map set. lv.stackMaps = lv.stackMapSet.extractUniqe() lv.stackMapSet = bvecSet{} diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index d4d23a2956..5d0098b4e6 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4318,12 +4318,6 @@ func (s *state) openDeferExit() { } } - if i == len(s.openDefers)-1 { - // Record the call of the first defer. This will be used - // to set liveness info for the deferreturn (which is also - // used for any location that causes a runtime panic) - s.f.LastDeferExit = call - } s.endBlock() s.startBlock(bEnd) } @@ -5807,11 +5801,6 @@ type SSAGenState struct { // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard. OnWasmStackSkipped int - - // Liveness index for the first function call in the final defer exit code - // path that we generated. All defer functions and args should be live at - // this point. This will be used to set the liveness for the deferreturn. - lastDeferLiveness LivenessIndex } // Prog appends a new Prog. @@ -6056,12 +6045,6 @@ func genssa(f *ssa.Func, pp *Progs) { // instruction. s.pp.nextLive = s.livenessMap.Get(v) - // Remember the liveness index of the first defer call of - // the last defer exit - if v.Block.Func.LastDeferExit != nil && v == v.Block.Func.LastDeferExit { - s.lastDeferLiveness = s.pp.nextLive - } - // Special case for first line in function; move it to the start. if firstPos != src.NoXPos { s.SetPos(firstPos) @@ -6122,7 +6105,7 @@ func genssa(f *ssa.Func, pp *Progs) { // When doing open-coded defers, generate a disconnected call to // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. - s.pp.nextLive = s.lastDeferLiveness + s.pp.nextLive = s.livenessMap.deferreturn gencallret(pp, Deferreturn) } diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 7cf72a8e37..4b9189fb3e 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -33,15 +33,8 @@ type Func struct { Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID) Entry *Block // the entry basic block - // If we are using open-coded defers, this is the first call to a deferred - // function in the final defer exit sequence that we generated. This call - // should be after all defer statements, and will have all args, etc. of - // all defer calls as live. The liveness info of this call will be used - // for the deferreturn/ret segment generated for functions with open-coded - // defers. - LastDeferExit *Value - bid idAlloc // block ID allocator - vid idAlloc // value ID allocator + bid idAlloc // block ID allocator + vid idAlloc // value ID allocator // Given an environment variable used for debug hash match, // what file (if any) receives the yes/no logging? diff --git a/test/fixedbugs/issue40629.go b/test/fixedbugs/issue40629.go new file mode 100644 index 0000000000..c6ef408f49 --- /dev/null +++ b/test/fixedbugs/issue40629.go @@ -0,0 +1,69 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +const N = 40 + +func main() { + var x [N]int // stack-allocated memory + for i := range x { + x[i] = 0x999 + } + + // This defer checks to see if x is uncorrupted. + defer func(p *[N]int) { + recover() + for i := range p { + if p[i] != 0x999 { + for j := range p { + fmt.Printf("p[%d]=0x%x\n", j, p[j]) + } + panic("corrupted stack variable") + } + } + }(&x) + + // This defer starts a new goroutine, which will (hopefully) + // overwrite x on the garbage stack. + defer func() { + c := make(chan bool) + go func() { + useStack(1000) + c <- true + }() + <-c + + }() + + // This defer causes a stack copy. + // The old stack is now garbage. + defer func() { + useStack(1000) + }() + + // Trigger a segfault. + *g = 0 + + // Make the return statement unreachable. + // That makes the stack map at the deferreturn call empty. + // In particular, the argument to the first defer is not + // marked as a pointer, so it doesn't get adjusted + // during the stack copy. + for { + } +} + +var g *int64 + +func useStack(n int) { + if n == 0 { + return + } + useStack(n - 1) +} -- cgit v1.2.3-54-g00ecf From 400c68a04d2d3a177899438948c7b712054521e6 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Thu, 17 Sep 2020 09:09:39 -0400 Subject: [release-branch.go1.15] cmd/addr2line: don't assume that GOROOT_FINAL is clean Updates #41447 Fixes #41453 Change-Id: I4460c1c7962d02c41622a5ea1a3c4bc3714a1873 Reviewed-on: https://go-review.googlesource.com/c/go/+/255477 Trust: Bryan C. Mills Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor (cherry picked from commit 6796a7fb127676b61375339076ae1c982a721dde) Reviewed-on: https://go-review.googlesource.com/c/go/+/255658 --- src/cmd/addr2line/addr2line_test.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/src/cmd/addr2line/addr2line_test.go b/src/cmd/addr2line/addr2line_test.go index 578d88e432..7973aa2fe1 100644 --- a/src/cmd/addr2line/addr2line_test.go +++ b/src/cmd/addr2line/addr2line_test.go @@ -73,29 +73,37 @@ func testAddr2Line(t *testing.T, exepath, addr string) { if err != nil { t.Fatalf("Stat failed: %v", err) } + // Debug paths are stored slash-separated, so convert to system-native. srcPath = filepath.FromSlash(srcPath) fi2, err := os.Stat(srcPath) - if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && strings.HasPrefix(srcPath, gorootFinal) { - if os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2)) { - // srcPath has had GOROOT_FINAL substituted for GOROOT, and it doesn't - // match the actual file. GOROOT probably hasn't been moved to its final - // location yet, so try the original location instead. + + // If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps + // srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been + // moved to its final location yet. If so, try the original location instead. + if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && + (os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) { + // srcPath is clean, but GOROOT_FINAL itself might not be. + // (See https://golang.org/issue/41447.) + gorootFinal = filepath.Clean(gorootFinal) + + if strings.HasPrefix(srcPath, gorootFinal) { fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal)) } } + if err != nil { t.Fatalf("Stat failed: %v", err) } if !os.SameFile(fi1, fi2) { t.Fatalf("addr2line_test.go and %s are not same file", srcPath) } - if srcLineNo != "99" { - t.Fatalf("line number = %v; want 99", srcLineNo) + if srcLineNo != "107" { + t.Fatalf("line number = %v; want 107", srcLineNo) } } -// This is line 98. The test depends on that. +// This is line 106. The test depends on that. func TestAddr2Line(t *testing.T) { testenv.MustHaveGoBuild(t) -- cgit v1.2.3-54-g00ecf From bf79f91d3dc424b2e61d5a48fc6864b8aea65ed3 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Mon, 10 Aug 2020 20:02:22 +0000 Subject: [release-branch.go1.15] runtime: disable stack shrinking in activeStackChans race window Currently activeStackChans is set before a goroutine blocks on a channel operation in an unlockf passed to gopark. The trouble is that the unlockf is called *after* the G's status is changed, and the G's status is what is used by a concurrent mark worker (calling suspendG) to determine that a G has successfully been suspended. In this window between the status change and unlockf, the mark worker could try to shrink the G's stack, and in particular observe that activeStackChans is false. This observation will cause the mark worker to *not* synchronize with concurrent channel operations when it should, and so updating pointers in the sudog for the blocked goroutine (which may point to the goroutine's stack) races with channel operations which may also manipulate the pointer (read it, dereference it, update it, etc.). Fix the problem by adding a new atomically-updated flag to the g struct called parkingOnChan, which is non-zero in the race window above. Then, in isShrinkStackSafe, check if parkingOnChan is zero. The race is resolved like so: * Blocking G sets parkingOnChan, then changes status in gopark. * Mark worker successfully suspends blocking G. * If the mark worker observes parkingOnChan is non-zero when checking isShrinkStackSafe, then it's not safe to shrink (we're in the race window). * If the mark worker observes parkingOnChan as zero, then because the mark worker observed the G status change, it can be sure that gopark's unlockf completed, and gp.activeStackChans will be correct. The risk of this change is low, since although it reduces the number of places that stack shrinking is allowed, the window here is incredibly small. Essentially, every place that it might crash now is replaced with no shrink. This change adds a test, but the race window is so small that it's hard to trigger without a well-placed sleep in park_m. Also, this change fixes stackGrowRecursive in proc_test.go to actually allocate a 128-byte stack frame. It turns out the compiler was destructuring the "pad" field and only allocating one uint64 on the stack. For #40641. Fixes #40643. Change-Id: I7dfbe7d460f6972b8956116b137bc13bc24464e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/247050 Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Michael Pratt Trust: Michael Knyszek (cherry picked from commit eb3c6a93c3236bbde5dee6cc5bd4ca9f8ab1647a) Reviewed-on: https://go-review.googlesource.com/c/go/+/256300 Reviewed-by: Austin Clements --- src/runtime/chan.go | 22 +++++++++++++++++++ src/runtime/chan_test.go | 56 ++++++++++++++++++++++++++++++++++++++++++++++++ src/runtime/proc_test.go | 10 ++++++++- src/runtime/runtime2.go | 4 ++++ src/runtime/select.go | 19 ++++++++++++++++ src/runtime/stack.go | 13 ++++++++++- 6 files changed, 122 insertions(+), 2 deletions(-) diff --git a/src/runtime/chan.go b/src/runtime/chan.go index f6f4ffd02e..d5daa4b13d 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -250,6 +250,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { gp.waiting = mysg gp.param = nil c.sendq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + atomic.Store8(&gp.parkingOnChan, 1) gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2) // Ensure the value being sent is kept alive until the // receiver copies it out. The sudog has a pointer to the @@ -564,6 +569,11 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) mysg.c = c gp.param = nil c.recvq.enqueue(mysg) + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + atomic.Store8(&gp.parkingOnChan, 1) gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2) // someone woke us up @@ -641,7 +651,19 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. + // Set activeStackChans here instead of before we try parking + // because we could self-deadlock in stack growth on the + // channel lock. gp.activeStackChans = true + // Mark that it's safe for stack shrinking to occur now, + // because any thread acquiring this G's stack for shrinking + // is guaranteed to observe activeStackChans after this store. + atomic.Store8(&gp.parkingOnChan, 0) + // Make sure we unlock after setting activeStackChans and + // unsetting parkingOnChan. The moment we unlock chanLock + // we risk gp getting readied by a channel operation and + // so gp could continue running before everything before + // the unlock is visible (even to gp itself). unlock((*mutex)(chanLock)) return true } diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go index 039a086e9b..756bbbeccf 100644 --- a/src/runtime/chan_test.go +++ b/src/runtime/chan_test.go @@ -623,6 +623,62 @@ func TestShrinkStackDuringBlockedSend(t *testing.T) { <-done } +func TestNoShrinkStackWhileParking(t *testing.T) { + // The goal of this test is to trigger a "racy sudog adjustment" + // throw. Basically, there's a window between when a goroutine + // becomes available for preemption for stack scanning (and thus, + // stack shrinking) but before the goroutine has fully parked on a + // channel. See issue 40641 for more details on the problem. + // + // The way we try to induce this failure is to set up two + // goroutines: a sender and a reciever that communicate across + // a channel. We try to set up a situation where the sender + // grows its stack temporarily then *fully* blocks on a channel + // often. Meanwhile a GC is triggered so that we try to get a + // mark worker to shrink the sender's stack and race with the + // sender parking. + // + // Unfortunately the race window here is so small that we + // either need a ridiculous number of iterations, or we add + // "usleep(1000)" to park_m, just before the unlockf call. + const n = 10 + send := func(c chan<- int, done chan struct{}) { + for i := 0; i < n; i++ { + c <- i + // Use lots of stack briefly so that + // the GC is going to want to shrink us + // when it scans us. Make sure not to + // do any function calls otherwise + // in order to avoid us shrinking ourselves + // when we're preempted. + stackGrowthRecursive(20) + } + done <- struct{}{} + } + recv := func(c <-chan int, done chan struct{}) { + for i := 0; i < n; i++ { + // Sleep here so that the sender always + // fully blocks. + time.Sleep(10 * time.Microsecond) + <-c + } + done <- struct{}{} + } + for i := 0; i < n*20; i++ { + c := make(chan int) + done := make(chan struct{}) + go recv(c, done) + go send(c, done) + // Wait a little bit before triggering + // the GC to make sure the sender and + // reciever have gotten into their groove. + time.Sleep(50 * time.Microsecond) + runtime.GC() + <-done + <-done + } +} + func TestSelectDuplicateChannel(t *testing.T) { // This test makes sure we can queue a G on // the same channel multiple times. diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go index de4dec36ce..767bde15b4 100644 --- a/src/runtime/proc_test.go +++ b/src/runtime/proc_test.go @@ -523,9 +523,17 @@ func BenchmarkPingPongHog(b *testing.B) { <-done } +var padData [128]uint64 + func stackGrowthRecursive(i int) { var pad [128]uint64 - if i != 0 && pad[0] == 0 { + pad = padData + for j := range pad { + if pad[j] != 0 { + return + } + } + if i != 0 { stackGrowthRecursive(i - 1) } } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index cffdb0bf27..0d0cac240b 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -447,6 +447,10 @@ type g struct { // copying needs to acquire channel locks to protect these // areas of the stack. activeStackChans bool + // parkingOnChan indicates that the goroutine is about to + // park on a chansend or chanrecv. Used to signal an unsafe point + // for stack shrinking. It's a boolean value, but is updated atomically. + parkingOnChan uint8 raceignore int8 // ignore race detection events sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine diff --git a/src/runtime/select.go b/src/runtime/select.go index a069e3e050..69d255712a 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -7,6 +7,7 @@ package runtime // This file contains the implementation of Go select statements. import ( + "runtime/internal/atomic" "unsafe" ) @@ -77,7 +78,20 @@ func selunlock(scases []scase, lockorder []uint16) { func selparkcommit(gp *g, _ unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. + // Set activeStackChans here instead of before we try parking + // because we could self-deadlock in stack growth on a + // channel lock. gp.activeStackChans = true + // Mark that it's safe for stack shrinking to occur now, + // because any thread acquiring this G's stack for shrinking + // is guaranteed to observe activeStackChans after this store. + atomic.Store8(&gp.parkingOnChan, 0) + // Make sure we unlock after setting activeStackChans and + // unsetting parkingOnChan. The moment we unlock any of the + // channel locks we risk gp getting readied by a channel operation + // and so gp could continue running before everything before the + // unlock is visible (even to gp itself). + // This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all @@ -316,6 +330,11 @@ loop: // wait for someone to wake us up gp.param = nil + // Signal to anyone trying to shrink our stack that we're about + // to park on a channel. The window between when this G's status + // changes and when we set gp.activeStackChans is not safe for + // stack shrinking. + atomic.Store8(&gp.parkingOnChan, 1) gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1) gp.activeStackChans = false diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 0e930f60db..f38ba56c80 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -864,6 +864,13 @@ func copystack(gp *g, newsize uintptr) { // Adjust sudogs, synchronizing with channel ops if necessary. ncopy := used if !gp.activeStackChans { + if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 { + // It's not safe for someone to shrink this stack while we're actively + // parking on a channel, but it is safe to grow since we do that + // ourselves and explicitly don't want to synchronize with channels + // since we could self-deadlock. + throw("racy sudog adjustment due to parking on channel") + } adjustsudogs(gp, &adjinfo) } else { // sudogs may be pointing in to the stack and gp has @@ -1103,7 +1110,11 @@ func isShrinkStackSafe(gp *g) bool { // We also can't copy the stack if we're at an asynchronous // safe-point because we don't have precise pointer maps for // all frames. - return gp.syscallsp == 0 && !gp.asyncSafePoint + // + // We also can't *shrink* the stack in the window between the + // goroutine calling gopark to park on a channel and + // gp.activeStackChans being set. + return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0 } // Maybe shrink the stack being used by gp. -- cgit v1.2.3-54-g00ecf From 884179022e0f57d6cf26a5fb12695a33dc1780fc Mon Sep 17 00:00:00 2001 From: Daniel Martí Date: Thu, 10 Sep 2020 22:53:59 +0100 Subject: [release-branch.go1.15] cmd/go: relax version's error on unexpected flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In https://golang.org/cl/221397 we made commands like "go version -v" error, since both of the command's flags only make sense when arguments follow them. Without arguments, the command only reports Go's own version, and the flags are most likely a mistake. However, the script below is entirely reasonable: export GOFLAGS=-v # make all Go commands verbose go version go build After the previous CL, "go version" would error. Instead, only error if the flag was passed explicitly, and not via GOFLAGS. The patch does mean that we won't error on "GOFLAGS=-v go version -v", but that very unlikely false negative is okay. The error is only meant to help the user not misuse the flags, anyway - it's not a critical error of any sort. To reuse inGOFLAGS, we move it to the base package and export it there, since it's where the rest of the GOFLAGS funcs are. Fixes #41464. Change-Id: I74003dd25d94bacf9ac507b5cad778fd65233321 Reviewed-on: https://go-review.googlesource.com/c/go/+/254157 Trust: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills (cherry picked from commit de0957dc081e1ec49c99a0f37403ceadbaaedf85) Reviewed-on: https://go-review.googlesource.com/c/go/+/255498 Trust: Bryan C. Mills Run-TryBot: Bryan C. Mills --- src/cmd/go/internal/base/goflags.go | 17 +++++++++++++++++ src/cmd/go/internal/version/version.go | 9 ++++++++- src/cmd/go/internal/work/init.go | 22 +++------------------- src/cmd/go/testdata/script/version.txt | 6 ++++++ 4 files changed, 34 insertions(+), 20 deletions(-) diff --git a/src/cmd/go/internal/base/goflags.go b/src/cmd/go/internal/base/goflags.go index 34766134b0..f29cc3cf9a 100644 --- a/src/cmd/go/internal/base/goflags.go +++ b/src/cmd/go/internal/base/goflags.go @@ -153,3 +153,20 @@ func SetFromGOFLAGS(flags *flag.FlagSet) { } } } + +// InGOFLAGS returns whether GOFLAGS contains the given flag, such as "-mod". +func InGOFLAGS(flag string) bool { + for _, goflag := range GOFLAGS() { + name := goflag + if strings.HasPrefix(name, "--") { + name = name[1:] + } + if i := strings.Index(name, "="); i >= 0 { + name = name[:i] + } + if name == flag { + return true + } + } + return false +} diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go index ac2ae50155..a5b13653cd 100644 --- a/src/cmd/go/internal/version/version.go +++ b/src/cmd/go/internal/version/version.go @@ -53,7 +53,14 @@ var ( func runVersion(cmd *base.Command, args []string) { if len(args) == 0 { - if *versionM || *versionV { + // If any of this command's flags were passed explicitly, error + // out, because they only make sense with arguments. + // + // Don't error if the flags came from GOFLAGS, since that can be + // a reasonable use case. For example, imagine GOFLAGS=-v to + // turn "verbose mode" on for all Go commands, which should not + // break "go version". + if (!base.InGOFLAGS("-m") && *versionM) || (!base.InGOFLAGS("-v") && *versionV) { fmt.Fprintf(os.Stderr, "go version: flags can only be used with arguments\n") base.SetExitStatus(2) return diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index dad3b10111..c168364cd1 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -254,34 +254,18 @@ func buildModeInit() { case "": // ok case "readonly", "vendor", "mod": - if !cfg.ModulesEnabled && !inGOFLAGS("-mod") { + if !cfg.ModulesEnabled && !base.InGOFLAGS("-mod") { base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod) } default: base.Fatalf("-mod=%s not supported (can be '', 'mod', 'readonly', or 'vendor')", cfg.BuildMod) } if !cfg.ModulesEnabled { - if cfg.ModCacheRW && !inGOFLAGS("-modcacherw") { + if cfg.ModCacheRW && !base.InGOFLAGS("-modcacherw") { base.Fatalf("build flag -modcacherw only valid when using modules") } - if cfg.ModFile != "" && !inGOFLAGS("-mod") { + if cfg.ModFile != "" && !base.InGOFLAGS("-mod") { base.Fatalf("build flag -modfile only valid when using modules") } } } - -func inGOFLAGS(flag string) bool { - for _, goflag := range base.GOFLAGS() { - name := goflag - if strings.HasPrefix(name, "--") { - name = name[1:] - } - if i := strings.Index(name, "="); i >= 0 { - name = name[:i] - } - if name == flag { - return true - } - } - return false -} diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt index 0123ac6d53..87cb6befe9 100644 --- a/src/cmd/go/testdata/script/version.txt +++ b/src/cmd/go/testdata/script/version.txt @@ -9,6 +9,12 @@ stderr 'with arguments' ! go version -v stderr 'with arguments' +# Neither of the two flags above should be an issue via GOFLAGS. +env GOFLAGS='-m -v' +go version +stdout '^go version' +env GOFLAGS= + env GO111MODULE=on # Skip the builds below if we are running in short mode. [short] skip -- cgit v1.2.3-54-g00ecf From af06e65910fc976c63a65decd2881ba34114428f Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Wed, 23 Sep 2020 03:58:52 -0700 Subject: [release-branch.go1.15] bytes, internal/bytealg: fix incorrect IndexString usage The IndexString implementation in the bytealg package requires that the string passed into it be in the range '2 <= len(s) <= MaxLen' where MaxLen may be any value (including 0). CL 156998 added calls to bytealg.IndexString where MaxLen was not first checked. This led to an illegal instruction on s390x with the vector facility disabled. This CL guards the calls to bytealg.IndexString with a MaxLen check. If the check fails then the code now falls back to the pre CL 156998 implementation (a loop over the runes in the string). Since the MaxLen check is now in place the generic implementation is no longer called so I have returned it to its original unimplemented state. In future we may want to drop MaxLen to prevent this kind of confusion. Fixes #41595. Change-Id: I81d88cf8c5ae143a8f5f460d18f8269cb6c0f28c Reviewed-on: https://go-review.googlesource.com/c/go/+/256921 Trust: Michael Munday Run-TryBot: Michael Munday Reviewed-by: Keith Randall TryBot-Result: Go Bot --- src/bytes/bytes.go | 50 ++++++++++++++++++++++------------- src/internal/bytealg/index_generic.go | 38 ++------------------------ 2 files changed, 34 insertions(+), 54 deletions(-) diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go index aa07b9fbc1..ce52649f13 100644 --- a/src/bytes/bytes.go +++ b/src/bytes/bytes.go @@ -227,19 +227,26 @@ func IndexAny(s []byte, chars string) int { continue } r, width = utf8.DecodeRune(s[i:]) - if r == utf8.RuneError { - for _, r = range chars { - if r == utf8.RuneError { + if r != utf8.RuneError { + // r is 2 to 4 bytes + if len(chars) == width { + if chars == string(r) { return i } + continue + } + // Use bytealg.IndexString for performance if available. + if bytealg.MaxLen >= width { + if bytealg.IndexString(chars, string(r)) >= 0 { + return i + } + continue } - continue } - // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes - // package should not import the strings package, use bytealg.IndexString - // instead. And this does not seem to lose much performance. - if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 { - return i + for _, ch := range chars { + if r == ch { + return i + } } } return -1 @@ -304,19 +311,26 @@ func LastIndexAny(s []byte, chars string) int { } r, size := utf8.DecodeLastRune(s[:i]) i -= size - if r == utf8.RuneError { - for _, r = range chars { - if r == utf8.RuneError { + if r != utf8.RuneError { + // r is 2 to 4 bytes + if len(chars) == size { + if chars == string(r) { return i } + continue + } + // Use bytealg.IndexString for performance if available. + if bytealg.MaxLen >= size { + if bytealg.IndexString(chars, string(r)) >= 0 { + return i + } + continue } - continue } - // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes - // package should not import the strings package, use bytealg.IndexString - // instead. And this does not seem to lose much performance. - if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 { - return i + for _, ch := range chars { + if r == ch { + return i + } } } return -1 diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go index 83345f1013..98e859f925 100644 --- a/src/internal/bytealg/index_generic.go +++ b/src/internal/bytealg/index_generic.go @@ -16,42 +16,8 @@ func Index(a, b []byte) int { // IndexString returns the index of the first instance of b in a, or -1 if b is not present in a. // Requires 2 <= len(b) <= MaxLen. -func IndexString(s, substr string) int { - // This is a partial copy of strings.Index, here because bytes.IndexAny and bytes.LastIndexAny - // call bytealg.IndexString. Some platforms have an optimized assembly version of this function. - // This implementation is used for those that do not. Although the pure Go implementation here - // works for the case of len(b) > MaxLen, we do not require that its assembly implementation also - // supports the case of len(b) > MaxLen. And we do not guarantee that this function supports the - // case of len(b) > MaxLen. - n := len(substr) - c0 := substr[0] - c1 := substr[1] - i := 0 - t := len(s) - n + 1 - fails := 0 - for i < t { - if s[i] != c0 { - o := IndexByteString(s[i:t], c0) - if o < 0 { - return -1 - } - i += o - } - if s[i+1] == c1 && s[i:i+n] == substr { - return i - } - i++ - fails++ - if fails >= 4+i>>4 && i < t { - // See comment in src/bytes/bytes.go. - j := IndexRabinKarp(s[i:], substr) - if j < 0 { - return -1 - } - return i + j - } - } - return -1 +func IndexString(a, b string) int { + panic("unimplemented") } // Cutover reports the number of failures of IndexByte we should tolerate -- cgit v1.2.3-54-g00ecf From 2c2e11f34595669c118fcfd3fe9de562e00b71df Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 7 Oct 2020 16:37:05 -0700 Subject: [release-branch.go1.15] cmd/cgo: add more architectures to size maps This brings over the architectures that the gofrontend knows about. This permits using the main cgo tool for those architectures, as cgo can be used with -godefs without gc support. This will help add golang.org/x/sys/unix support for other architectures. For #37443 Fixes #41871 Change-Id: I63632b9c5139e71b9ccab8edcc7acdb464229b74 Reviewed-on: https://go-review.googlesource.com/c/go/+/260657 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Than McIntosh (cherry picked from commit 5d1378143bc07791296abb420df35537ad80492f) Reviewed-on: https://go-review.googlesource.com/c/go/+/260702 --- src/cmd/cgo/main.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 5a7bb3f87b..890daf65c4 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -169,35 +169,51 @@ func usage() { var ptrSizeMap = map[string]int64{ "386": 4, + "alpha": 8, "amd64": 8, "arm": 4, "arm64": 8, + "m68k": 4, "mips": 4, "mipsle": 4, "mips64": 8, "mips64le": 8, + "nios2": 4, + "ppc": 4, "ppc64": 8, "ppc64le": 8, + "riscv": 4, "riscv64": 8, "s390": 4, "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, "sparc64": 8, } var intSizeMap = map[string]int64{ "386": 4, + "alpha": 8, "amd64": 8, "arm": 4, "arm64": 8, + "m68k": 4, "mips": 4, "mipsle": 4, "mips64": 8, "mips64le": 8, + "nios2": 4, + "ppc": 4, "ppc64": 8, "ppc64le": 8, + "riscv": 4, "riscv64": 8, "s390": 4, "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, "sparc64": 8, } -- cgit v1.2.3-54-g00ecf From 0b80775b8fb2f08d0cb9a673f13eb30ec17372d3 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sun, 23 Aug 2020 11:52:53 -0700 Subject: [release-branch.go1.15] runtime: implement StorepNoWB for wasm in assembly The second argument of StorepNoWB must be forced to escape. The current Go code does not explicitly enforce that property. By implementing in assembly, and not using go:noescape, we force the issue. Test is in CL 249761. Issue #40975. This CL is needed for CL 249917, which changes how go:notinheap works and breaks the previous StorepNoWB wasm code. I checked for other possible errors like this. This is the only go:notinheap that isn't in the runtime itself. Included test from CL 249761. Update #41432 Change-Id: I43400a806662655727c4a3baa8902b63bdc9fa57 Reviewed-on: https://go-review.googlesource.com/c/go/+/249962 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky (cherry picked from commit c0602603b20186228b4f89f265cb3f7665e06768) Reviewed-on: https://go-review.googlesource.com/c/go/+/260878 Trust: Keith Randall Trust: Cuong Manh Le Run-TryBot: Dmitri Shuralyov TryBot-Result: Go Bot Reviewed-by: Cuong Manh Le --- src/runtime/internal/atomic/asm_wasm.s | 10 ++++++++++ src/runtime/internal/atomic/atomic_test.go | 10 ++++++++++ src/runtime/internal/atomic/atomic_wasm.go | 13 +++++-------- 3 files changed, 25 insertions(+), 8 deletions(-) create mode 100644 src/runtime/internal/atomic/asm_wasm.s diff --git a/src/runtime/internal/atomic/asm_wasm.s b/src/runtime/internal/atomic/asm_wasm.s new file mode 100644 index 0000000000..7c33cb1ee9 --- /dev/null +++ b/src/runtime/internal/atomic/asm_wasm.s @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), 0(R0) + RET diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go index 0c1125c558..b0a8fa0610 100644 --- a/src/runtime/internal/atomic/atomic_test.go +++ b/src/runtime/internal/atomic/atomic_test.go @@ -220,3 +220,13 @@ func TestBitwiseContended(t *testing.T) { } } } + +func TestStorepNoWB(t *testing.T) { + var p [2]*int + for i := range p { + atomic.StorepNoWB(unsafe.Pointer(&p[i]), unsafe.Pointer(new(int))) + } + if p[0] == p[1] { + t.Error("Bad escape analysis of StorepNoWB") + } +} diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go index 9037c2f7c8..2c0c3a8174 100644 --- a/src/runtime/internal/atomic/atomic_wasm.go +++ b/src/runtime/internal/atomic/atomic_wasm.go @@ -153,14 +153,11 @@ func Store64(ptr *uint64, val uint64) { *ptr = val } -//go:notinheap -type noWB struct{} - -//go:noinline -//go:nosplit -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) { - *(**noWB)(ptr) = (*noWB)(val) -} +// StorepNoWB performs *ptr = val atomically and without a write +// barrier. +// +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) //go:nosplit //go:noinline -- cgit v1.2.3-54-g00ecf From a460a2beee167fc5e7ada6d4effbbf2a52969bfb Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 22 Aug 2020 14:07:30 -0700 Subject: [release-branch.go1.15] cmd/compile: make Haspointers a method instead of a function More ergonomic that way. Also change Haspointers to HasPointers while we are here. Change-Id: I45bedc294c1a8c2bd01dc14bd04615ae77555375 Reviewed-on: https://go-review.googlesource.com/c/go/+/249959 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Emmanuel Odeke Reviewed-by: Matthew Dempsky Reviewed-on: https://go-review.googlesource.com/c/go/+/255318 Trust: Keith Randall Reviewed-by: Austin Clements --- src/cmd/compile/internal/gc/esc.go | 4 ++-- src/cmd/compile/internal/gc/escape.go | 12 ++++++------ src/cmd/compile/internal/gc/gsubr.go | 3 +-- src/cmd/compile/internal/gc/order.go | 6 +++--- src/cmd/compile/internal/gc/pgen.go | 6 +++--- src/cmd/compile/internal/gc/pgen_test.go | 4 ++-- src/cmd/compile/internal/gc/plive.go | 2 +- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 10 +++++----- src/cmd/compile/internal/gc/ssa.go | 12 ++++++------ src/cmd/compile/internal/gc/walk.go | 14 +++++++------- src/cmd/compile/internal/types/type.go | 16 ++++++++-------- 12 files changed, 45 insertions(+), 46 deletions(-) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index f3e9ab78ef..37b0ef8c52 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -377,7 +377,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { return unsafeUintptrTag } - if !types.Haspointers(f.Type) { // don't bother tagging for scalars + if !f.Type.HasPointers() { // don't bother tagging for scalars return "" } @@ -415,7 +415,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string { } } - if !types.Haspointers(f.Type) { // don't bother tagging for scalars + if !f.Type.HasPointers() { // don't bother tagging for scalars return "" } diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index 5dc755186e..ddf89f6159 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -326,7 +326,7 @@ func (e *Escape) stmt(n *Node) { if typesw && n.Left.Left != nil { cv := cas.Rlist.First() k := e.dcl(cv) // type switch variables have no ODCL. - if types.Haspointers(cv.Type) { + if cv.Type.HasPointers() { ks = append(ks, k.dotType(cv.Type, cas, "switch case")) } } @@ -433,7 +433,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) { if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() { // nop - } else if k.derefs >= 0 && !types.Haspointers(n.Type) { + } else if k.derefs >= 0 && !n.Type.HasPointers() { k = e.discardHole() } @@ -698,7 +698,7 @@ func (e *Escape) addr(n *Node) EscHole { e.assignHeap(n.Right, "key of map put", n) } - if !types.Haspointers(n.Type) { + if !n.Type.HasPointers() { k = e.discardHole() } @@ -811,14 +811,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { // slice might be allocated, and all slice elements // might flow to heap. appendeeK := ks[0] - if types.Haspointers(args[0].Type.Elem()) { + if args[0].Type.Elem().HasPointers() { appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice")) } argument(appendeeK, args[0]) if call.IsDDD() { appendedK := e.discardHole() - if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) { + if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() { appendedK = e.heapHole().deref(call, "appended slice...") } argument(appendedK, args[1]) @@ -832,7 +832,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) { argument(e.discardHole(), call.Left) copiedK := e.discardHole() - if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) { + if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() { copiedK = e.heapHole().deref(call, "copied slice") } argument(copiedK, call.Right) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 336e870bbd..15a84a8a43 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -32,7 +32,6 @@ package gc import ( "cmd/compile/internal/ssa" - "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" @@ -316,7 +315,7 @@ func ggloblnod(nam *Node) { if nam.Name.Readonly() { flags = obj.RODATA } - if nam.Type != nil && !types.Haspointers(nam.Type) { + if nam.Type != nil && !nam.Type.HasPointers() { flags |= obj.NOPTR } Ctxt.Globl(s, nam.Type.Width, flags) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 6b6107290a..6f7ef01bcf 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -928,7 +928,7 @@ func (o *Order) stmt(n *Node) { n2.Ninit.Append(tmp2) } - r.Left = o.newTemp(r.Right.Left.Type.Elem(), types.Haspointers(r.Right.Left.Type.Elem())) + r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers()) tmp2 := nod(OAS, tmp1, r.Left) tmp2 = typecheck(tmp2, ctxStmt) n2.Ninit.Append(tmp2) @@ -1407,7 +1407,7 @@ func (o *Order) as2(n *Node) { left := []*Node{} for ni, l := range n.List.Slice() { if !l.isBlank() { - tmp := o.newTemp(l.Type, types.Haspointers(l.Type)) + tmp := o.newTemp(l.Type, l.Type.HasPointers()) n.List.SetIndex(ni, tmp) tmplist = append(tmplist, tmp) left = append(left, l) @@ -1429,7 +1429,7 @@ func (o *Order) okAs2(n *Node) { var tmp1, tmp2 *Node if !n.List.First().isBlank() { typ := n.Right.Type - tmp1 = o.newTemp(typ, types.Haspointers(typ)) + tmp1 = o.newTemp(typ, typ.HasPointers()) } if !n.List.Second().isBlank() { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 74654c86bc..662fbbda5f 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -80,8 +80,8 @@ func cmpstackvarlt(a, b *Node) bool { return a.Name.Used() } - ap := types.Haspointers(a.Type) - bp := types.Haspointers(b.Type) + ap := a.Type.HasPointers() + bp := b.Type.HasPointers() if ap != bp { return ap } @@ -176,7 +176,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } s.stksize += w s.stksize = Rnd(s.stksize, int64(n.Type.Align)) - if types.Haspointers(n.Type) { + if n.Type.HasPointers() { s.stkptrsize = s.stksize lastHasPtr = true } else { diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 89b977de85..41f0808a1c 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -185,8 +185,8 @@ func TestStackvarSort(t *testing.T) { // exercise this function on all inputs so that reflect.DeepEqual // doesn't produce false positives. for i := range want { - types.Haspointers(want[i].Type) - types.Haspointers(inp[i].Type) + want[i].Type.HasPointers() + inp[i].Type.HasPointers() } sort.Sort(byStackVar(inp)) diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index bdb458015f..4681d8ea72 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -259,7 +259,7 @@ func (v *varRegVec) AndNot(v1, v2 varRegVec) { // nor do we care about empty structs (handled by the pointer check), // nor do we care about the fake PAUTOHEAP variables. func livenessShouldTrack(n *Node) bool { - return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type) + return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers() } // getvariables returns the list of on-stack variables that we need to track diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 1cf0a0862f..d78a5f0d8d 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -334,7 +334,7 @@ func walkrange(n *Node) *Node { hv1 := temp(t.Elem()) hv1.SetTypecheck(1) - if types.Haspointers(t.Elem()) { + if t.Elem().HasPointers() { init = append(init, nod(OAS, hv1, nil)) } hb := temp(types.Types[TBOOL]) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index f614b60685..2670baf999 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -119,7 +119,7 @@ func bmap(t *types.Type) *types.Type { // the type of the overflow field to uintptr in this case. // See comment on hmap.overflow in runtime/map.go. otyp := types.NewPtr(bucket) - if !types.Haspointers(elemtype) && !types.Haspointers(keytype) { + if !elemtype.HasPointers() && !keytype.HasPointers() { otyp = types.Types[TUINTPTR] } overflow := makefield("overflow", otyp) @@ -754,7 +754,7 @@ var kinds = []int{ // typeptrdata returns the length in bytes of the prefix of t // containing pointer data. Anything after this offset is scalar data. func typeptrdata(t *types.Type) int64 { - if !types.Haspointers(t) { + if !t.HasPointers() { return 0 } @@ -788,7 +788,7 @@ func typeptrdata(t *types.Type) int64 { // Find the last field that has pointers. var lastPtrField *types.Field for _, t1 := range t.Fields().Slice() { - if types.Haspointers(t1.Type) { + if t1.Type.HasPointers() { lastPtrField = t1 } } @@ -1726,7 +1726,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { for i := range ptrmask { ptrmask[i] = 0 } - if !types.Haspointers(t) { + if !t.HasPointers() { return } @@ -1795,7 +1795,7 @@ func (p *GCProg) end() { func (p *GCProg) emit(t *types.Type, offset int64) { dowidth(t) - if !types.Haspointers(t) { + if !t.HasPointers() { return } if t.Width == int64(Widthptr) { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 5d0098b4e6..3e21450deb 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4206,7 +4206,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false) addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false) } - if types.Haspointers(t) { + if t.HasPointers() { // Since we may use this argTemp during exit depending on the // deferBits, we must define it unconditionally on entry. // Therefore, we must make sure it is zeroed out in the entry @@ -4308,12 +4308,12 @@ func (s *state) openDeferExit() { s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) } if r.rcvrNode != nil { - if types.Haspointers(r.rcvrNode.Type) { + if r.rcvrNode.Type.HasPointers() { s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) } } for _, argNode := range r.argNodes { - if types.Haspointers(argNode.Type) { + if argNode.Type.HasPointers() { s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) } } @@ -4953,7 +4953,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) { s.instrument(t, left, true) - if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { + if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) { // Known to not have write barrier. Store the whole type. s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt) return @@ -4965,7 +4965,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, // TODO: if the writebarrier pass knows how to reorder stores, // we can do a single store here as long as skip==0. s.storeTypeScalars(t, left, right, skip) - if skip&skipPtr == 0 && types.Haspointers(t) { + if skip&skipPtr == 0 && t.HasPointers() { s.storeTypePtrs(t, left, right) } } @@ -5037,7 +5037,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { n := t.NumFields() for i := 0; i < n; i++ { ft := t.FieldType(i) - if !types.Haspointers(ft) { + if !ft.HasPointers() { continue } addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index b7cf313938..9291301f36 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -380,9 +380,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { switch { case from.Size() == 2 && from.Align == 2: return "convT16", false - case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): + case from.Size() == 4 && from.Align == 4 && !from.HasPointers(): return "convT32", false - case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): + case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers(): return "convT64", false } if sc := from.SoleComponent(); sc != nil { @@ -396,12 +396,12 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { switch tkind { case 'E': - if !types.Haspointers(from) { + if !from.HasPointers() { return "convT2Enoptr", true } return "convT2E", true case 'I': - if !types.Haspointers(from) { + if !from.HasPointers() { return "convT2Inoptr", true } return "convT2I", true @@ -1405,7 +1405,7 @@ opswitch: copylen := nod(OLEN, n.Right, nil) copyptr := nod(OSPTR, n.Right, nil) - if !types.Haspointers(t.Elem()) && n.Bounded() { + if !t.Elem().HasPointers() && n.Bounded() { // When len(to)==len(from) and elements have no pointers: // replace make+copy with runtime.mallocgc+runtime.memmove. @@ -2855,7 +2855,7 @@ func isAppendOfMake(n *Node) bool { // s = s[:n] // lptr := &l1[0] // sptr := &s[0] -// if lptr == sptr || !hasPointers(T) { +// if lptr == sptr || !T.HasPointers() { // // growslice did not clear the whole underlying array (or did not get called) // hp := &s[len(l1)] // hn := l2 * sizeof(T) @@ -2936,7 +2936,7 @@ func extendslice(n *Node, init *Nodes) *Node { hn = conv(hn, types.Types[TUINTPTR]) clrname := "memclrNoHeapPointers" - hasPointers := types.Haspointers(elemtype) + hasPointers := elemtype.HasPointers() if hasPointers { clrname = "memclrHasPointers" Curfn.Func.setWBPos(n.Pos) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 3b7b31c5d6..9c51b00f84 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1400,11 +1400,11 @@ func (t *Type) IsUntyped() bool { // TODO(austin): We probably only need HasHeapPointer. See // golang.org/cl/73412 for discussion. -func Haspointers(t *Type) bool { - return Haspointers1(t, false) +func (t *Type) HasPointers() bool { + return t.hasPointers1(false) } -func Haspointers1(t *Type, ignoreNotInHeap bool) bool { +func (t *Type) hasPointers1(ignoreNotInHeap bool) bool { switch t.Etype { case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA: @@ -1414,11 +1414,11 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool { if t.NumElem() == 0 { // empty array has no pointers return false } - return Haspointers1(t.Elem(), ignoreNotInHeap) + return t.Elem().hasPointers1(ignoreNotInHeap) case TSTRUCT: for _, t1 := range t.Fields().Slice() { - if Haspointers1(t1.Type, ignoreNotInHeap) { + if t1.Type.hasPointers1(ignoreNotInHeap) { return true } } @@ -1429,7 +1429,7 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool { case TTUPLE: ttup := t.Extra.(*Tuple) - return Haspointers1(ttup.first, ignoreNotInHeap) || Haspointers1(ttup.second, ignoreNotInHeap) + return ttup.first.hasPointers1(ignoreNotInHeap) || ttup.second.hasPointers1(ignoreNotInHeap) } return true @@ -1439,7 +1439,7 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool { // This is used for write barrier insertion, so it ignores // pointers to go:notinheap types. func (t *Type) HasHeapPointer() bool { - return Haspointers1(t, true) + return t.hasPointers1(true) } func (t *Type) Symbol() *obj.LSym { @@ -1470,7 +1470,7 @@ func FakeRecvType() *Type { } var ( - // TSSA types. Haspointers assumes these are pointer-free. + // TSSA types. HasPointers assumes these are pointer-free. TypeInvalid = newSSA("invalid") TypeMem = newSSA("mem") TypeFlags = newSSA("flags") -- cgit v1.2.3-54-g00ecf From 439ce71eb6a472c5f758021a92f06d8489ca0689 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 21 Aug 2020 20:20:12 -0700 Subject: [release-branch.go1.15] cmd/compile: don't allow go:notinheap on the heap or stack Right now we just prevent such types from being on the heap. This CL makes it so they cannot appear on the stack either. The distinction between heap and stack is pretty vague at the language level (e.g. it is affected by -N), and we don't need the flexibility anyway. Once go:notinheap types cannot be in either place, we don't need to consider pointers to such types to be pointers, at least according to the garbage collector and stack copying. (This is the big win of this CL, in my opinion.) The distinction between HasPointers and HasHeapPointer no longer exists. There is only HasPointers. This CL is cleanup before possible use of go:notinheap to fix #40954. Update #13386 Change-Id: Ibd895aadf001c0385078a6d4809c3f374991231a Reviewed-on: https://go-review.googlesource.com/c/go/+/255320 Trust: Keith Randall Reviewed-by: Austin Clements Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/escape.go | 3 +++ src/cmd/compile/internal/gc/pgen_test.go | 10 +--------- src/cmd/compile/internal/gc/plive.go | 14 +++++++------- src/cmd/compile/internal/gc/range.go | 2 +- src/cmd/compile/internal/gc/walk.go | 23 +++++++++++------------ src/cmd/compile/internal/ssa/decompose.go | 2 +- src/cmd/compile/internal/ssa/gen/dec.rules | 4 ++-- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- src/cmd/compile/internal/ssa/rewritedec.go | 7 ++++--- src/cmd/compile/internal/ssa/writebarrier.go | 2 +- src/cmd/compile/internal/types/type.go | 24 ++++++------------------ src/runtime/export_test.go | 7 +++---- src/runtime/mgcmark.go | 3 ++- src/runtime/mgcstack.go | 2 -- src/runtime/runtime2.go | 2 -- test/notinheap2.go | 12 ++++++++++-- 16 files changed, 53 insertions(+), 66 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index ddf89f6159..d5cca4a38b 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1029,6 +1029,9 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { if e.curfn == nil { Fatalf("e.curfn isn't set") } + if n != nil && n.Type != nil && n.Type.NotInHeap() { + yyerrorl(n.Pos, "%v is go:notinheap; stack allocation disallowed", n.Type) + } n = canonicalNode(n) loc := &EscLocation{ diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index 41f0808a1c..b1db29825c 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type { func typeWithPointers() *types.Type { t := types.New(TSTRUCT) - f := &types.Field{Type: types.New(TPTR)} + f := &types.Field{Type: types.NewPtr(types.New(TINT))} t.SetFields([]*types.Field{f}) return t } @@ -181,14 +181,6 @@ func TestStackvarSort(t *testing.T) { nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO), } - // haspointers updates Type.Haspointers as a side effect, so - // exercise this function on all inputs so that reflect.DeepEqual - // doesn't produce false positives. - for i := range want { - want[i].Type.HasPointers() - inp[i].Type.HasPointers() - } - sort.Sort(byStackVar(inp)) if !reflect.DeepEqual(want, inp) { t.Error("sort failed") diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 4681d8ea72..09e5487f60 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -436,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) { case ssa.LocalSlot: return mask case *ssa.Register: - if ptrOnly && !v.Type.HasHeapPointer() { + if ptrOnly && !v.Type.HasPointers() { return mask } regs[0] = loc @@ -451,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) { if loc1 == nil { continue } - if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() { + if ptrOnly && !v.Type.FieldType(i).HasPointers() { continue } regs[nreg] = loc1.(*ssa.Register) @@ -568,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) { if t.Align > 0 && off&int64(t.Align-1) != 0 { Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) } + if !t.HasPointers() { + // Note: this case ensures that pointers to go:notinheap types + // are not considered pointers by garbage collection and stack copying. + return + } switch t.Etype { - case TINT8, TUINT8, TINT16, TUINT16, - TINT32, TUINT32, TINT64, TUINT64, - TINT, TUINT, TUINTPTR, TBOOL, - TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128: - case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP: if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid alignment, %v", t) diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index d78a5f0d8d..5434b0167a 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -586,7 +586,7 @@ func arrayClear(n, v1, v2, a *Node) bool { n.Nbody.Append(nod(OAS, hn, tmp)) var fn *Node - if a.Type.Elem().HasHeapPointer() { + if a.Type.Elem().HasPointers() { // memclrHasPointers(hp, hn) Curfn.Func.setWBPos(stmt.Pos) fn = mkcall("memclrHasPointers", nil, nil, hp, hn) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9291301f36..8dd1277705 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1151,6 +1151,9 @@ opswitch: } case ONEW: + if n.Type.Elem().NotInHeap() { + yyerror("%v is go:notinheap; heap allocation disallowed", n.Type.Elem()) + } if n.Esc == EscNone { if n.Type.Elem().Width >= maxImplicitStackVarSize { Fatalf("large ONEW with EscNone: %v", n) @@ -1319,6 +1322,9 @@ opswitch: l = r } t := n.Type + if t.Elem().NotInHeap() { + yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) + } if n.Esc == EscNone { if !isSmallMakeSlice(n) { Fatalf("non-small OMAKESLICE with EscNone: %v", n) @@ -1360,10 +1366,6 @@ opswitch: // When len and cap can fit into int, use makeslice instead of // makeslice64, which is faster and shorter on 32 bit platforms. - if t.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) - } - len, cap := l, r fnname := "makeslice64" @@ -1398,7 +1400,7 @@ opswitch: t := n.Type if t.Elem().NotInHeap() { - Fatalf("%v is go:notinheap; heap allocation disallowed", t.Elem()) + yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) } length := conv(n.Left, types.Types[TINT]) @@ -2002,9 +2004,6 @@ func walkprint(nn *Node, init *Nodes) *Node { } func callnew(t *types.Type) *Node { - if t.NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t) - } dowidth(t) n := nod(ONEWOBJ, typename(t), nil) n.Type = types.NewPtr(t) @@ -2579,7 +2578,7 @@ func mapfast(t *types.Type) int { } switch algtype(t.Key()) { case AMEM32: - if !t.Key().HasHeapPointer() { + if !t.Key().HasPointers() { return mapfast32 } if Widthptr == 4 { @@ -2587,7 +2586,7 @@ func mapfast(t *types.Type) int { } Fatalf("small pointer %v", t.Key()) case AMEM64: - if !t.Key().HasHeapPointer() { + if !t.Key().HasPointers() { return mapfast64 } if Widthptr == 8 { @@ -2734,7 +2733,7 @@ func appendslice(n *Node, init *Nodes) *Node { nodes.Append(nod(OAS, s, nt)) var ncopy *Node - if elemtype.HasHeapPointer() { + if elemtype.HasPointers() { // copy(s[len(l1):], l2) nptr1 := nod(OSLICE, s, nil) nptr1.Type = s.Type @@ -3072,7 +3071,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // Also works if b is a string. // func copyany(n *Node, init *Nodes, runtimecall bool) *Node { - if n.Left.Type.Elem().HasHeapPointer() { + if n.Left.Type.Elem().HasPointers() { Curfn.Func.setWBPos(n.Pos) fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem()) n.Left = cheapexpr(n.Left, init) diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index c59ec4c77d..6e72e3825c 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -139,7 +139,7 @@ func decomposeStringPhi(v *Value) { func decomposeSlicePhi(v *Value) { types := &v.Block.Func.Config.Types - ptrType := types.BytePtr + ptrType := v.Type.Elem().PtrTo() lenType := types.Int ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType) diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules index 3fd2be409f..4c677f8418 100644 --- a/src/cmd/compile/internal/ssa/gen/dec.rules +++ b/src/cmd/compile/internal/ssa/gen/dec.rules @@ -66,14 +66,14 @@ (Load (OffPtr [2*config.PtrSize] ptr) mem)) -(Store dst (SliceMake ptr len cap) mem) => +(Store {t} dst (SliceMake ptr len cap) mem) => (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len - (Store {typ.BytePtr} dst ptr mem))) + (Store {t.Elem().PtrTo()} dst ptr mem))) // interface ops (ITab (IMake itab _)) => itab diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 6b24371ac7..d1bad529e7 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -235,7 +235,7 @@ func nilcheckelim2(f *Func) { continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { - if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasHeapPointer()) { + if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) { // These ops don't really change memory. continue // Note: OpVarDef requires that the defined variable not have pointers. diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index cef781ffaa..e0fa9768d9 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -328,9 +328,10 @@ func rewriteValuedec_OpStore(v *Value) bool { v.AddArg3(v0, len, v1) return true } - // match: (Store dst (SliceMake ptr len cap) mem) - // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))) + // match: (Store {t} dst (SliceMake ptr len cap) mem) + // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem))) for { + t := auxToType(v.Aux) dst := v_0 if v_1.Op != OpSliceMake { break @@ -350,7 +351,7 @@ func rewriteValuedec_OpStore(v *Value) bool { v2.AuxInt = int64ToAuxInt(config.PtrSize) v2.AddArg(dst) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) - v3.Aux = typeToAux(typ.BytePtr) + v3.Aux = typeToAux(t.Elem().PtrTo()) v3.AddArg3(dst, ptr, mem) v1.AddArg3(v2, len, v3) v.AddArg3(v0, cap, v1) diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index c7fb059475..214798a1ab 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -31,7 +31,7 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool { if !ok { v.Fatalf("store aux is not a type: %s", v.LongString()) } - if !t.HasHeapPointer() { + if !t.HasPointers() { return false } if IsStackAddr(v.Args[0]) { diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 9c51b00f84..7d9becfa00 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1397,14 +1397,9 @@ func (t *Type) IsUntyped() bool { return false } -// TODO(austin): We probably only need HasHeapPointer. See -// golang.org/cl/73412 for discussion. - +// HasPointers reports whether t contains a heap pointer. +// Note that this function ignores pointers to go:notinheap types. func (t *Type) HasPointers() bool { - return t.hasPointers1(false) -} - -func (t *Type) hasPointers1(ignoreNotInHeap bool) bool { switch t.Etype { case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA: @@ -1414,34 +1409,27 @@ func (t *Type) hasPointers1(ignoreNotInHeap bool) bool { if t.NumElem() == 0 { // empty array has no pointers return false } - return t.Elem().hasPointers1(ignoreNotInHeap) + return t.Elem().HasPointers() case TSTRUCT: for _, t1 := range t.Fields().Slice() { - if t1.Type.hasPointers1(ignoreNotInHeap) { + if t1.Type.HasPointers() { return true } } return false case TPTR, TSLICE: - return !(ignoreNotInHeap && t.Elem().NotInHeap()) + return !t.Elem().NotInHeap() case TTUPLE: ttup := t.Extra.(*Tuple) - return ttup.first.hasPointers1(ignoreNotInHeap) || ttup.second.hasPointers1(ignoreNotInHeap) + return ttup.first.HasPointers() || ttup.second.HasPointers() } return true } -// HasHeapPointer reports whether t contains a heap pointer. -// This is used for write barrier insertion, so it ignores -// pointers to go:notinheap types. -func (t *Type) HasHeapPointer() bool { - return t.hasPointers1(true) -} - func (t *Type) Symbol() *obj.LSym { return TypeLinkSym(t) } diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index e43813bcc4..b268d5a058 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -986,9 +986,8 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) { } func MSpanCountAlloc(bits []byte) int { - s := mspan{ - nelems: uintptr(len(bits) * 8), - gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])), - } + s := (*mspan)(mheap_.spanalloc.alloc()) + s.nelems = uintptr(len(bits) * 8) + s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0])) return s.countAlloc() } diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index fe988c46d9..b2fbcd4105 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -937,7 +937,8 @@ func scanstack(gp *g, gcw *gcWork) { x := state.head state.head = x.next if stackTraceDebug { - for _, obj := range x.obj[:x.nobj] { + for i := 0; i < x.nobj; i++ { + obj := &x.obj[i] if obj.typ == nil { // reachable continue } diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go index 211d882fa6..8eb941a328 100644 --- a/src/runtime/mgcstack.go +++ b/src/runtime/mgcstack.go @@ -167,8 +167,6 @@ func (obj *stackObject) setType(typ *_type) { // A stackScanState keeps track of the state used during the GC walk // of a goroutine. -// -//go:notinheap type stackScanState struct { cache pcvalueCache diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 0d0cac240b..814364aa42 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -918,8 +918,6 @@ type _defer struct { // handling during stack growth: because they are pointer-typed and // _panic values only live on the stack, regular stack pointer // adjustment takes care of them. -// -//go:notinheap type _panic struct { argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink arg interface{} // argument to panic diff --git a/test/notinheap2.go b/test/notinheap2.go index 944f2993ab..de1e6db1d3 100644 --- a/test/notinheap2.go +++ b/test/notinheap2.go @@ -13,12 +13,14 @@ type nih struct { next *nih } -// Globals and stack variables are okay. +// Global variables are okay. var x nih +// Stack variables are not okay. + func f() { - var y nih + var y nih // ERROR "nih is go:notinheap; stack allocation disallowed" x = y } @@ -26,11 +28,17 @@ func f() { var y *nih var z []nih +var w []nih +var n int func g() { y = new(nih) // ERROR "heap allocation disallowed" z = make([]nih, 1) // ERROR "heap allocation disallowed" z = append(z, x) // ERROR "heap allocation disallowed" + // Test for special case of OMAKESLICECOPY + x := make([]nih, n) // ERROR "heap allocation disallowed" + copy(x, z) + z = x } // Writes don't produce write barriers. -- cgit v1.2.3-54-g00ecf From 142027888a7d3da0494d77581166f1d120317864 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 26 Aug 2020 14:07:35 -0700 Subject: [release-branch.go1.15] cmd/compile: allow aliases to go:notinheap types The alias doesn't need to be marked go:notinheap. It gets its notinheap-ness from the target type. Without this change, the type alias test in the notinheap.go file generates these two errors: notinheap.go:62: misplaced compiler directive notinheap.go:63: type nih must be go:notinheap The first is a result of go:notinheap pragmas not applying to type alias declarations. The second is the result of then trying to match the notinheap-ness of the alias and the target type. Add a few more go:notinheap tests while we are here. Update #40954 Change-Id: I067ec47698df6e9e593e080d67796fd05a1d480f Reviewed-on: https://go-review.googlesource.com/c/go/+/250939 Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Emmanuel Odeke Trust: Keith Randall Reviewed-on: https://go-review.googlesource.com/c/go/+/255337 Trust: Emmanuel Odeke Reviewed-by: Austin Clements --- src/cmd/compile/internal/gc/typecheck.go | 2 +- test/notinheap.go | 8 ++++++++ test/notinheap2.go | 10 +++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index dec4b96fc4..b29f5ef5ec 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2068,7 +2068,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxStmt n.Left = typecheck(n.Left, ctxType) checkwidth(n.Left.Type) - if n.Left.Type != nil && n.Left.Type.NotInHeap() && n.Left.Name.Param.Pragma&NotInHeap == 0 { + if n.Left.Type != nil && n.Left.Type.NotInHeap() && !n.Left.Name.Param.Alias && n.Left.Name.Param.Pragma&NotInHeap == 0 { // The type contains go:notinheap types, so it // must be marked as such (alternatively, we // could silently propagate go:notinheap). diff --git a/test/notinheap.go b/test/notinheap.go index 16c3f8faf0..a2284a5068 100644 --- a/test/notinheap.go +++ b/test/notinheap.go @@ -52,6 +52,14 @@ type t3 byte //go:notinheap type t4 rune +// Type aliases inherit the go:notinheap-ness of the type they alias. +type nihAlias = nih + +type embedAlias1 struct { // ERROR "must be go:notinheap" + x nihAlias +} +type embedAlias2 [1]nihAlias // ERROR "must be go:notinheap" + var sink interface{} func i() { diff --git a/test/notinheap2.go b/test/notinheap2.go index de1e6db1d3..09d0fc0b7b 100644 --- a/test/notinheap2.go +++ b/test/notinheap2.go @@ -27,14 +27,18 @@ func f() { // Heap allocation is not okay. var y *nih +var y2 *struct{ x nih } +var y3 *[1]nih var z []nih var w []nih var n int func g() { - y = new(nih) // ERROR "heap allocation disallowed" - z = make([]nih, 1) // ERROR "heap allocation disallowed" - z = append(z, x) // ERROR "heap allocation disallowed" + y = new(nih) // ERROR "heap allocation disallowed" + y2 = new(struct{ x nih }) // ERROR "heap allocation disallowed" + y3 = new([1]nih) // ERROR "heap allocation disallowed" + z = make([]nih, 1) // ERROR "heap allocation disallowed" + z = append(z, x) // ERROR "heap allocation disallowed" // Test for special case of OMAKESLICECOPY x := make([]nih, n) // ERROR "heap allocation disallowed" copy(x, z) -- cgit v1.2.3-54-g00ecf From 96983721d42e9e238ea9fec720849ab7bb616122 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 26 Aug 2020 14:17:35 -0700 Subject: [release-branch.go1.15] cmd/cgo: use go:notinheap for anonymous structs They can't reasonably be allocated on the heap. Not a huge deal, but it has an interesting and useful side effect. After CL 249917, the compiler and runtime treat pointers to go:notinheap types as uintptrs instead of real pointers (no write barrier, not processed during stack scanning, ...). That feature is exactly what we want for cgo to fix #40954. All the cases we have of pointers declared in C, but which might actually be filled with non-pointer data, are of this form (JNI's jobject heirarch, Darwin's CFType heirarchy, ...). Fixes #40954 Change-Id: I44a3b9bc2513d4287107e39d0cbbd0efd46a3aae Reviewed-on: https://go-review.googlesource.com/c/go/+/250940 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Trust: Keith Randall Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/c/go/+/255321 --- src/cmd/cgo/gcc.go | 15 +++++++++++++++ src/cmd/cgo/main.go | 3 ++- src/cmd/cgo/out.go | 3 +++ test/fixedbugs/issue40954.go | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue40954.go diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index a59534ebd0..e512d51fda 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -2434,6 +2434,18 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ tt := *t tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}} tt.Go = c.Ident("struct{}") + if dt.Kind == "struct" { + // We don't know what the representation of this struct is, so don't let + // anyone allocate one on the Go side. As a side effect of this annotation, + // pointers to this type will not be considered pointers in Go. They won't + // get writebarrier-ed or adjusted during a stack copy. This should handle + // all the cases badPointerTypedef used to handle, but hopefully will + // continue to work going forward without any more need for cgo changes. + tt.NotInHeap = true + // TODO: we should probably do the same for unions. Unions can't live + // on the Go heap, right? It currently doesn't work for unions because + // they are defined as a type alias for struct{}, not a defined type. + } typedef[name.Name] = &tt break } @@ -2504,6 +2516,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ } t.Go = name t.BadPointer = sub.BadPointer + t.NotInHeap = sub.NotInHeap if unionWithPointer[sub.Go] { unionWithPointer[t.Go] = true } @@ -2514,6 +2527,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ tt := *t tt.Go = sub.Go tt.BadPointer = sub.BadPointer + tt.NotInHeap = sub.NotInHeap typedef[name.Name] = &tt } @@ -3022,6 +3036,7 @@ func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool { // non-pointers in this type. // TODO: Currently our best solution is to find these manually and list them as // they come up. A better solution is desired. +// Note: DEPRECATED. There is now a better solution. Search for NotInHeap in this file. func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool { if c.badCFType(dt) { return true diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 890daf65c4..c72c9abcc1 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -151,7 +151,8 @@ type Type struct { Go ast.Expr EnumValues map[string]int64 Typedef string - BadPointer bool + BadPointer bool // this pointer type should be represented as a uintptr (deprecated) + NotInHeap bool // this type should have a go:notinheap annotation } // A FuncType collects information about a function type in both the C and Go worlds. diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 4064f0ae41..cbdb4e0f5b 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -108,6 +108,9 @@ func (p *Package) writeDefs() { sort.Strings(typedefNames) for _, name := range typedefNames { def := typedef[name] + if def.NotInHeap { + fmt.Fprintf(fgo2, "//go:notinheap\n") + } fmt.Fprintf(fgo2, "type %s ", name) // We don't have source info for these types, so write them out without source info. // Otherwise types would look like: diff --git a/test/fixedbugs/issue40954.go b/test/fixedbugs/issue40954.go new file mode 100644 index 0000000000..53e9ccf387 --- /dev/null +++ b/test/fixedbugs/issue40954.go @@ -0,0 +1,35 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "unsafe" +) + +//go:notinheap +type S struct{ x int } + +func main() { + var i int + p := (*S)(unsafe.Pointer(uintptr(unsafe.Pointer(&i)))) + v := uintptr(unsafe.Pointer(p)) + // p is a pointer to a go:notinheap type. Like some C libraries, + // we stored an integer in that pointer. That integer just happens + // to be the address of i. + // v is also the address of i. + // p has a base type which is marked go:notinheap, so it + // should not be adjusted when the stack is copied. + recurse(100, p, v) +} +func recurse(n int, p *S, v uintptr) { + if n > 0 { + recurse(n-1, p, v) + } + if uintptr(unsafe.Pointer(p)) != v { + panic("adjusted notinheap pointer") + } +} -- cgit v1.2.3-54-g00ecf From 46a6fd080635001d7f9fbd8118094f0aa21661a0 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 27 Aug 2020 14:05:52 -0700 Subject: [release-branch.go1.15] cmd/compile: make go:notinheap error message friendlier for cgo Update #40954 Change-Id: Ifaab7349631ccb12fc892882bbdf7f0ebf3d845f Reviewed-on: https://go-review.googlesource.com/c/go/+/251158 Run-TryBot: Keith Randall Reviewed-by: Ian Lance Taylor TryBot-Result: Go Bot Trust: Keith Randall Reviewed-on: https://go-review.googlesource.com/c/go/+/255338 Reviewed-by: Austin Clements --- src/cmd/compile/internal/gc/escape.go | 2 +- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/gc/typecheck.go | 6 +++--- src/cmd/compile/internal/gc/walk.go | 8 ++++---- test/notinheap.go | 14 +++++++------- test/notinheap2.go | 14 +++++++------- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index d5cca4a38b..08000dd374 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1030,7 +1030,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { Fatalf("e.curfn isn't set") } if n != nil && n.Type != nil && n.Type.NotInHeap() { - yyerrorl(n.Pos, "%v is go:notinheap; stack allocation disallowed", n.Type) + yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type) } n = canonicalNode(n) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 9362c74288..c82eefb63d 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -696,14 +696,14 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op { // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't. if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() { if why != nil { - *why = fmt.Sprintf(":\n\t%v is go:notinheap, but %v is not", dst.Elem(), src.Elem()) + *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem()) } return OXXX } // (b) Disallow string to []T where T is go:notinheap. if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) { if why != nil { - *why = fmt.Sprintf(":\n\t%v is go:notinheap", dst.Elem()) + *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) } return OXXX } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index b29f5ef5ec..56a03f1931 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -471,10 +471,10 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if l.Type.NotInHeap() { - yyerror("go:notinheap map key not allowed") + yyerror("incomplete (or unallocatable) map key not allowed") } if r.Type.NotInHeap() { - yyerror("go:notinheap map value not allowed") + yyerror("incomplete (or unallocatable) map value not allowed") } setTypeNode(n, types.NewMap(l.Type, r.Type)) @@ -491,7 +491,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n } if l.Type.NotInHeap() { - yyerror("chan of go:notinheap type not allowed") + yyerror("chan of incomplete (or unallocatable) type not allowed") } setTypeNode(n, types.NewChan(l.Type, n.TChanDir())) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 8dd1277705..af6602f70c 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -640,7 +640,7 @@ opswitch: // x = append(...) r := n.Right if r.Type.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem()) + yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem()) } switch { case isAppendOfMake(r): @@ -1152,7 +1152,7 @@ opswitch: case ONEW: if n.Type.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", n.Type.Elem()) + yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem()) } if n.Esc == EscNone { if n.Type.Elem().Width >= maxImplicitStackVarSize { @@ -1323,7 +1323,7 @@ opswitch: } t := n.Type if t.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) + yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } if n.Esc == EscNone { if !isSmallMakeSlice(n) { @@ -1400,7 +1400,7 @@ opswitch: t := n.Type if t.Elem().NotInHeap() { - yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) + yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } length := conv(n.Left, types.Types[TINT]) diff --git a/test/notinheap.go b/test/notinheap.go index a2284a5068..5dd4997a65 100644 --- a/test/notinheap.go +++ b/test/notinheap.go @@ -23,11 +23,11 @@ type embed3 struct { // ERROR "must be go:notinheap" x [1]nih } -type embed4 map[nih]int // ERROR "go:notinheap map key not allowed" +type embed4 map[nih]int // ERROR "incomplete \(or unallocatable\) map key not allowed" -type embed5 map[int]nih // ERROR "go:notinheap map value not allowed" +type embed5 map[int]nih // ERROR "incomplete \(or unallocatable\) map value not allowed" -type emebd6 chan nih // ERROR "chan of go:notinheap type not allowed" +type emebd6 chan nih // ERROR "chan of incomplete \(or unallocatable\) type not allowed" type okay1 *nih @@ -64,8 +64,8 @@ var sink interface{} func i() { sink = new(t1) // no error - sink = (*t2)(new(t1)) // ERROR "cannot convert(.|\n)*t2 is go:notinheap" - sink = (*t2)(new(struct{ x int })) // ERROR "cannot convert(.|\n)*t2 is go:notinheap" - sink = []t3("foo") // ERROR "cannot convert(.|\n)*t3 is go:notinheap" - sink = []t4("bar") // ERROR "cannot convert(.|\n)*t4 is go:notinheap" + sink = (*t2)(new(t1)) // ERROR "cannot convert(.|\n)*t2 is incomplete \(or unallocatable\)" + sink = (*t2)(new(struct{ x int })) // ERROR "cannot convert(.|\n)*t2 is incomplete \(or unallocatable\)" + sink = []t3("foo") // ERROR "cannot convert(.|\n)*t3 is incomplete \(or unallocatable\)" + sink = []t4("bar") // ERROR "cannot convert(.|\n)*t4 is incomplete \(or unallocatable\)" } diff --git a/test/notinheap2.go b/test/notinheap2.go index 09d0fc0b7b..23d4b0ae77 100644 --- a/test/notinheap2.go +++ b/test/notinheap2.go @@ -20,7 +20,7 @@ var x nih // Stack variables are not okay. func f() { - var y nih // ERROR "nih is go:notinheap; stack allocation disallowed" + var y nih // ERROR "nih is incomplete \(or unallocatable\); stack allocation disallowed" x = y } @@ -34,13 +34,13 @@ var w []nih var n int func g() { - y = new(nih) // ERROR "heap allocation disallowed" - y2 = new(struct{ x nih }) // ERROR "heap allocation disallowed" - y3 = new([1]nih) // ERROR "heap allocation disallowed" - z = make([]nih, 1) // ERROR "heap allocation disallowed" - z = append(z, x) // ERROR "heap allocation disallowed" + y = new(nih) // ERROR "can't be allocated in Go" + y2 = new(struct{ x nih }) // ERROR "can't be allocated in Go" + y3 = new([1]nih) // ERROR "can't be allocated in Go" + z = make([]nih, 1) // ERROR "can't be allocated in Go" + z = append(z, x) // ERROR "can't be allocated in Go" // Test for special case of OMAKESLICECOPY - x := make([]nih, n) // ERROR "heap allocation disallowed" + x := make([]nih, n) // ERROR "can't be allocated in Go" copy(x, z) z = x } -- cgit v1.2.3-54-g00ecf From cfeb16ddec5b2134198dcc029cdd501ed11a7c01 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 17 Sep 2020 09:55:23 -0700 Subject: [release-branch.go1.15] cmd/compile: propagate go:notinheap implicitly //go:notinheap type T int type U T We already correctly propagate the notinheap-ness of T to U. But we have an assertion in the typechecker that if there's no explicit //go:notinheap associated with U, then report an error. Get rid of that error so that implicit propagation is allowed. Adjust the tests so that we make sure that uses of types like U do correctly report an error when U is used in a context that might cause a Go heap allocation. Update #41432 Change-Id: I1692bc7cceff21ebb3f557f3748812a40887118d Reviewed-on: https://go-review.googlesource.com/c/go/+/255637 Run-TryBot: Keith Randall Reviewed-by: Cuong Manh Le Reviewed-by: Ian Lance Taylor Trust: Cuong Manh Le TryBot-Result: Go Bot (cherry picked from commit 22053790fa2c0944df53ea95df476ad2f855424f) Reviewed-on: https://go-review.googlesource.com/c/go/+/255697 Trust: Keith Randall Reviewed-by: Austin Clements --- src/cmd/compile/internal/gc/typecheck.go | 6 ------ test/notinheap.go | 20 -------------------- test/notinheap2.go | 26 ++++++++++++++++++++++++++ 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 56a03f1931..98b52a506a 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2068,12 +2068,6 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxStmt n.Left = typecheck(n.Left, ctxType) checkwidth(n.Left.Type) - if n.Left.Type != nil && n.Left.Type.NotInHeap() && !n.Left.Name.Param.Alias && n.Left.Name.Param.Pragma&NotInHeap == 0 { - // The type contains go:notinheap types, so it - // must be marked as such (alternatively, we - // could silently propagate go:notinheap). - yyerror("type %v must be go:notinheap", n.Left.Type) - } } t := n.Type diff --git a/test/notinheap.go b/test/notinheap.go index 5dd4997a65..2188a38a14 100644 --- a/test/notinheap.go +++ b/test/notinheap.go @@ -11,18 +11,6 @@ package p //go:notinheap type nih struct{} -// Types embedding notinheap types must be notinheap. - -type embed1 struct { // ERROR "must be go:notinheap" - x nih -} - -type embed2 [1]nih // ERROR "must be go:notinheap" - -type embed3 struct { // ERROR "must be go:notinheap" - x [1]nih -} - type embed4 map[nih]int // ERROR "incomplete \(or unallocatable\) map key not allowed" type embed5 map[int]nih // ERROR "incomplete \(or unallocatable\) map value not allowed" @@ -52,14 +40,6 @@ type t3 byte //go:notinheap type t4 rune -// Type aliases inherit the go:notinheap-ness of the type they alias. -type nihAlias = nih - -type embedAlias1 struct { // ERROR "must be go:notinheap" - x nihAlias -} -type embedAlias2 [1]nihAlias // ERROR "must be go:notinheap" - var sink interface{} func i() { diff --git a/test/notinheap2.go b/test/notinheap2.go index 23d4b0ae77..100ed37b72 100644 --- a/test/notinheap2.go +++ b/test/notinheap2.go @@ -32,6 +32,25 @@ var y3 *[1]nih var z []nih var w []nih var n int +var sink interface{} + +type embed1 struct { // implicitly notinheap + x nih +} + +type embed2 [1]nih // implicitly notinheap + +type embed3 struct { // implicitly notinheap + x [1]nih +} + +// Type aliases inherit the go:notinheap-ness of the type they alias. +type nihAlias = nih + +type embedAlias1 struct { // implicitly notinheap + x nihAlias +} +type embedAlias2 [1]nihAlias // implicitly notinheap func g() { y = new(nih) // ERROR "can't be allocated in Go" @@ -39,6 +58,13 @@ func g() { y3 = new([1]nih) // ERROR "can't be allocated in Go" z = make([]nih, 1) // ERROR "can't be allocated in Go" z = append(z, x) // ERROR "can't be allocated in Go" + + sink = new(embed1) // ERROR "can't be allocated in Go" + sink = new(embed2) // ERROR "can't be allocated in Go" + sink = new(embed3) // ERROR "can't be allocated in Go" + sink = new(embedAlias1) // ERROR "can't be allocated in Go" + sink = new(embedAlias2) // ERROR "can't be allocated in Go" + // Test for special case of OMAKESLICECOPY x := make([]nih, n) // ERROR "can't be allocated in Go" copy(x, z) -- cgit v1.2.3-54-g00ecf From 76a2c87a2c4e78f40f0f70bda3da93c773630179 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 2 Oct 2020 16:04:12 -0700 Subject: [release-branch.go1.15] cmd/compile: export notinheap annotation to object file In the rare case when a cgo type makes it into an object file, we need the go:notinheap annotation to go with it. Fixes #41432. Change-Id: Ie2ef241ee49661792e0d8c8c46c51b2fe5c6fa7c Reviewed-on: https://go-review.googlesource.com/c/go/+/259300 Trust: Keith Randall Reviewed-by: Ian Lance Taylor Reviewed-by: Cherry Zhang --- misc/cgo/test/testdata/issue41761.go | 20 ++++++++++++++++++++ misc/cgo/test/testdata/issue41761a/a.go | 14 ++++++++++++++ src/cmd/compile/internal/gc/iexport.go | 6 ++++++ src/cmd/compile/internal/gc/iimport.go | 5 +++++ 4 files changed, 45 insertions(+) create mode 100644 misc/cgo/test/testdata/issue41761.go create mode 100644 misc/cgo/test/testdata/issue41761a/a.go diff --git a/misc/cgo/test/testdata/issue41761.go b/misc/cgo/test/testdata/issue41761.go new file mode 100644 index 0000000000..919c749251 --- /dev/null +++ b/misc/cgo/test/testdata/issue41761.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* + typedef struct S S; +*/ +import "C" + +import ( + "cgotest/issue41761a" + "testing" +) + +func test41761(t *testing.T) { + var x issue41761a.T + _ = (*C.struct_S)(x.X) +} diff --git a/misc/cgo/test/testdata/issue41761a/a.go b/misc/cgo/test/testdata/issue41761a/a.go new file mode 100644 index 0000000000..ca5c18191e --- /dev/null +++ b/misc/cgo/test/testdata/issue41761a/a.go @@ -0,0 +1,14 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue41761a + +/* + typedef struct S S; +*/ +import "C" + +type T struct { + X *C.S +} diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 35b8d985cb..81eb975b08 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -492,6 +492,7 @@ func (p *iexporter) doDecl(n *Node) { w.signature(m.Type) } + w.typeExt(t) for _, m := range ms.Slice() { w.methExt(m) } @@ -1012,6 +1013,11 @@ func (w *exportWriter) symIdx(s *types.Sym) { } } +func (w *exportWriter) typeExt(t *types.Type) { + // Export whether this type is marked notinheap. + w.bool(t.NotInHeap()) +} + // Inline bodies. func (w *exportWriter) stmtList(list Nodes) { diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 104b5fb79a..d9148eae22 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -346,6 +346,7 @@ func (r *importReader) doDecl(n *Node) { } t.Methods().Set(ms) + r.typeExt(t) for _, m := range ms { r.methExt(m) } @@ -710,6 +711,10 @@ func (r *importReader) symIdx(s *types.Sym) { } } +func (r *importReader) typeExt(t *types.Type) { + t.SetNotInHeap(r.bool()) +} + func (r *importReader) doInline(n *Node) { if len(n.Func.Inl.Body) != 0 { Fatalf("%v already has inline body", n) -- cgit v1.2.3-54-g00ecf From 8b224e9951438283ae53ae35dc2a50a56fccc404 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Sat, 3 Oct 2020 16:44:22 -0400 Subject: [release-branch.go1.15] runtime: correct signature of call16 The signature of call16 is currently missing the "typ" parameter. This CL fixes this. This wasn't caught by vet because call16 is defined by macro expansion (see #17544), and we didn't notice the mismatch with the other call* functions because call16 is defined only on 32-bit architectures and lives alone in stubs32.go. Unfortunately, this means its GC signature is also wrong: the "arg" parameter is treated as a scalar rather than a pointer, so GC won't trace it and stack copying won't adjust it. This turns out to matter in exactly one case right now: on 32-bit architectures (which are the only architectures where call16 is defined), a stack-allocated defer of a function with a 16-byte or smaller argument frame including a non-empty result area can corrupt memory if the deferred function grows the stack and is invoked during a panic. Whew. All other current uses of reflectcall pass a heap-allocated "arg" frame (which happens to be reachable from other stack roots, so tracing isn't a problem). Curiously, in 2016, the signatures of all call* functions were wrong in exactly this way. CL 31654 fixed all of them in stubs.go, but missed the one in stubs32.go. Updates #41795. Fixes #41797. Change-Id: I31e3c0df201f79ee5707eeb8dc4ff0d13fc10ada Reviewed-on: https://go-review.googlesource.com/c/go/+/259338 Trust: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Reviewed-on: https://go-review.googlesource.com/c/go/+/259598 --- src/runtime/stubs32.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/stubs32.go b/src/runtime/stubs32.go index a7f52f6b9e..c4715fe989 100644 --- a/src/runtime/stubs32.go +++ b/src/runtime/stubs32.go @@ -11,4 +11,4 @@ import "unsafe" // Declarations for runtime services implemented in C or assembly that // are only present on 32 bit systems. -func call16(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16(typ, fn, arg unsafe.Pointer, n, retoffset uint32) -- cgit v1.2.3-54-g00ecf From 7863f742c4e8427a7169325b024c2d8a970e0134 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 1 Oct 2020 09:57:52 -0700 Subject: [release-branch.go1.15] cmd/compile: fix left shift constant folding rule The 32-bit left shift constant folding rule should keep its result properly sign extended. Fixes #41720 Fixes #41711 Change-Id: I0fc74444d444274e911952e1725dab0b7737a846 Reviewed-on: https://go-review.googlesource.com/c/go/+/258817 Trust: Keith Randall Run-TryBot: Keith Randall Reviewed-by: David Chase TryBot-Result: Go Bot --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 2 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 4 ++-- test/fixedbugs/issue41711.go | 17 +++++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 test/fixedbugs/issue41711.go diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 9967c7b030..ee9ccfb41c 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1454,7 +1454,7 @@ (XORQ x x) -> (MOVQconst [0]) (XORL x x) -> (MOVLconst [0]) -(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)]) +(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c) << uint64(d))]) (SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)]) (SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)]) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 20eab05e9c..72ed1eb62c 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -25454,7 +25454,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { return true } // match: (SHLLconst [d] (MOVLconst [c])) - // result: (MOVLconst [int64(int32(c)) << uint64(d)]) + // result: (MOVLconst [int64(int32(c) << uint64(d))]) for { d := v.AuxInt if v_0.Op != OpAMD64MOVLconst { @@ -25462,7 +25462,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { } c := v_0.AuxInt v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(int32(c)) << uint64(d) + v.AuxInt = int64(int32(c) << uint64(d)) return true } return false diff --git a/test/fixedbugs/issue41711.go b/test/fixedbugs/issue41711.go new file mode 100644 index 0000000000..4e09440ba5 --- /dev/null +++ b/test/fixedbugs/issue41711.go @@ -0,0 +1,17 @@ +// compile -d=ssa/check/on + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func f() uint32 { + s := "food" + x := uint32(s[0]) + uint32(s[1])<<8 + uint32(s[2])<<16 + uint32(s[3])<<24 + // x is a constant, but that's not known until lowering. + // shifting it by 8 moves the high byte up into the high 32 bits of + // a 64-bit word. That word is not properly sign-extended by the faulty + // rule, which causes the compiler to fail. + return x << 8 +} -- cgit v1.2.3-54-g00ecf From 1984ee00048b63eacd2155cd6d74a2d13e998272 Mon Sep 17 00:00:00 2001 From: Alexander Rakoczy Date: Wed, 14 Oct 2020 13:45:23 -0400 Subject: [release-branch.go1.15] go1.15.3 Change-Id: I8a45870039d0d3f210d883c464a7fed2abd9e28b Reviewed-on: https://go-review.googlesource.com/c/go/+/262337 Run-TryBot: Alexander Rakoczy Reviewed-by: Dmitri Shuralyov TryBot-Result: Go Bot Trust: Alexander Rakoczy --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 6792f7e519..46b20cee5e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -go1.15.2 \ No newline at end of file +go1.15.3 \ No newline at end of file -- cgit v1.2.3-54-g00ecf