aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGerrit Code Review <noreply-gerritcodereview@google.com>2022-07-20 13:55:41 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2022-07-20 13:55:41 +0000
commitae43bdc3e3f87f8ba05ba12a17104ddbb0e6b30c (patch)
tree08509173f7fb9aa62ed113602e1bc48703c10daf
parent7a8ba83b729e37d0bdddc9a3c93ae866d1ef199a (diff)
parentde649a2a98b2775fad65a06cbbf641545a65a245 (diff)
downloadgo-ae43bdc3e3f87f8ba05ba12a17104ddbb0e6b30c.tar.gz
go-ae43bdc3e3f87f8ba05ba12a17104ddbb0e6b30c.zip
Merge "[dev.unified] all: merge master (8e1e64c) into dev.unified" into dev.unified
-rw-r--r--AUTHORS1
-rw-r--r--CONTRIBUTORS104
-rw-r--r--doc/go1.19.html9
-rw-r--r--misc/cgo/test/setgid2_linux.go5
-rw-r--r--misc/cgo/testcarchive/carchive_test.go20
-rw-r--r--misc/cgo/testcshared/cshared_test.go35
-rw-r--r--misc/cgo/testshared/shared_test.go9
-rw-r--r--src/cmd/compile/doc.go3
-rw-r--r--src/cmd/compile/internal/base/base.go1
-rw-r--r--src/cmd/compile/internal/base/flag.go19
-rw-r--r--src/cmd/compile/internal/ir/mini.go1
-rw-r--r--src/cmd/compile/internal/ir/mknode.go9
-rw-r--r--src/cmd/compile/internal/noder/stencil.go19
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go340
-rw-r--r--src/cmd/compile/internal/types/size.go2
-rw-r--r--src/cmd/compile/internal/types2/api.go15
-rw-r--r--src/cmd/compile/internal/types2/sizes.go23
-rw-r--r--src/cmd/compile/internal/types2/sizes_test.go41
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go13
-rw-r--r--src/cmd/compile/internal/walk/order.go8
-rw-r--r--src/cmd/dist/buildtool.go4
-rw-r--r--src/cmd/go/alldocs.go48
-rw-r--r--src/cmd/go/go_test.go24
-rw-r--r--src/cmd/go/internal/fsys/fsys.go89
-rw-r--r--src/cmd/go/internal/help/helpdoc.go13
-rw-r--r--src/cmd/go/internal/list/list.go27
-rw-r--r--src/cmd/go/internal/load/pkg.go9
-rw-r--r--src/cmd/go/internal/modcmd/download.go66
-rw-r--r--src/cmd/go/internal/modcmd/why.go2
-rw-r--r--src/cmd/go/internal/modfetch/cache.go88
-rw-r--r--src/cmd/go/internal/modfetch/codehost/codehost.go96
-rw-r--r--src/cmd/go/internal/modfetch/codehost/git.go151
-rw-r--r--src/cmd/go/internal/modfetch/codehost/git_test.go87
-rw-r--r--src/cmd/go/internal/modfetch/codehost/vcs.go43
-rw-r--r--src/cmd/go/internal/modfetch/coderepo.go88
-rw-r--r--src/cmd/go/internal/modfetch/coderepo_test.go12
-rw-r--r--src/cmd/go/internal/modfetch/proxy.go38
-rw-r--r--src/cmd/go/internal/modfetch/repo.go39
-rw-r--r--src/cmd/go/internal/modindex/index_test.go87
-rw-r--r--src/cmd/go/internal/modindex/read.go461
-rw-r--r--src/cmd/go/internal/modindex/scan.go2
-rw-r--r--src/cmd/go/internal/modindex/write.go46
-rw-r--r--src/cmd/go/internal/modinfo/info.go22
-rw-r--r--src/cmd/go/internal/modload/build.go69
-rw-r--r--src/cmd/go/internal/modload/buildlist.go5
-rw-r--r--src/cmd/go/internal/modload/edit.go2
-rw-r--r--src/cmd/go/internal/modload/list.go60
-rw-r--r--src/cmd/go/internal/modload/mvs.go15
-rw-r--r--src/cmd/go/internal/modload/query.go104
-rw-r--r--src/cmd/go/internal/modload/search.go13
-rw-r--r--src/cmd/go/internal/work/build.go9
-rw-r--r--src/cmd/go/internal/work/init.go6
-rw-r--r--src/cmd/go/script_test.go19
-rw-r--r--src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt9
-rw-r--r--src/cmd/go/testdata/script/README17
-rw-r--r--src/cmd/go/testdata/script/build_buildvcs_auto.txt4
-rw-r--r--src/cmd/go/testdata/script/build_overlay.txt20
-rw-r--r--src/cmd/go/testdata/script/fsys_walk.txt6
-rw-r--r--src/cmd/go/testdata/script/get_issue16471.txt22
-rw-r--r--src/cmd/go/testdata/script/index.txt6
-rw-r--r--src/cmd/go/testdata/script/list_perm.txt (renamed from src/cmd/go/testdata/script/list_permissions.txt)3
-rw-r--r--src/cmd/go/testdata/script/mod_download_issue51114.txt21
-rw-r--r--src/cmd/go/testdata/script/mod_download_private_vcs.txt10
-rw-r--r--src/cmd/go/testdata/script/mod_perm.txt23
-rw-r--r--src/cmd/go/testdata/script/mod_retract_noupgrade.txt11
-rw-r--r--src/cmd/go/testdata/script/reuse_git.txt425
-rw-r--r--src/cmd/go/testdata/script/test_fuzz_cache.txt11
-rw-r--r--src/cmd/go/testdata/script/work_goproxy_off.txt59
-rw-r--r--src/cmd/go/testdata/script/work_why_download_graph.txt10
-rw-r--r--src/cmd/internal/notsha256/sha256block_386.s3
-rw-r--r--src/cmd/internal/notsha256/sha256block_amd64.go3
-rw-r--r--src/cmd/internal/notsha256/sha256block_amd64.s3
-rw-r--r--src/cmd/internal/notsha256/sha256block_decl.go3
-rw-r--r--src/cmd/internal/notsha256/sha256block_generic.go4
-rw-r--r--src/cmd/internal/notsha256/sha256block_ppc64x.s3
-rw-r--r--src/cmd/link/internal/ld/lib.go13
-rw-r--r--src/cmd/nm/nm_test.go13
-rw-r--r--src/cmd/trace/main.go4
-rw-r--r--src/cmd/trace/trace.go8
-rw-r--r--src/compress/gzip/gunzip.go60
-rw-r--r--src/compress/gzip/gunzip_test.go16
-rw-r--r--src/crypto/x509/parser.go22
-rw-r--r--src/crypto/x509/x509.go11
-rw-r--r--src/crypto/x509/x509_test.go75
-rw-r--r--src/database/sql/sql_test.go45
-rw-r--r--src/encoding/gob/decode.go19
-rw-r--r--src/encoding/gob/gobencdec_test.go24
-rw-r--r--src/encoding/xml/read.go42
-rw-r--r--src/encoding/xml/read_test.go31
-rw-r--r--src/flag/flag.go3
-rw-r--r--src/go/build/build.go3
-rw-r--r--src/go/build/constraint/expr.go4
-rw-r--r--src/go/build/doc.go7
-rw-r--r--src/go/doc/comment/parse.go7
-rw-r--r--src/go/doc/comment/testdata/linklist.txt18
-rw-r--r--src/go/doc/comment/testdata/linklist2.txt39
-rw-r--r--src/go/doc/comment/testdata/linklist3.txt31
-rw-r--r--src/go/doc/comment/testdata/linklist4.txt36
-rw-r--r--src/go/parser/interface.go10
-rw-r--r--src/go/parser/parser.go54
-rw-r--r--src/go/parser/parser_test.go165
-rw-r--r--src/go/parser/resolver.go9
-rw-r--r--src/go/types/api.go15
-rw-r--r--src/go/types/sizes.go23
-rw-r--r--src/go/types/sizes_test.go41
-rw-r--r--src/go/types/testdata/fixedbugs/issue39634.go13
-rw-r--r--src/image/jpeg/reader_test.go23
-rw-r--r--src/internal/trace/goroutines.go37
-rw-r--r--src/io/fs/glob.go14
-rw-r--r--src/io/fs/glob_test.go10
-rw-r--r--src/net/http/fs.go1
-rw-r--r--src/net/http/fs_test.go54
-rw-r--r--src/net/http/header.go3
-rw-r--r--src/net/http/request.go4
-rw-r--r--src/net/http/server.go22
-rw-r--r--src/net/http/transport.go3
-rw-r--r--src/net/http/transport_test.go16
-rw-r--r--src/net/url/url.go2
-rw-r--r--src/net/url/url_test.go10
-rw-r--r--src/os/exec/exec.go4
-rw-r--r--src/os/path_windows.go2
-rw-r--r--src/path/filepath/match.go12
-rw-r--r--src/path/filepath/match_test.go10
-rw-r--r--src/runtime/mcache.go36
-rw-r--r--src/runtime/mgcpacer.go22
-rw-r--r--src/runtime/mprof.go2
-rw-r--r--src/runtime/os_plan9.go2
-rw-r--r--src/runtime/race/README6
-rw-r--r--src/runtime/race/race_darwin_amd64.sysobin538536 -> 541464 bytes
-rw-r--r--src/runtime/race/race_freebsd_amd64.sysobin710664 -> 712464 bytes
-rw-r--r--src/runtime/race/race_linux_amd64.sysobin552768 -> 557744 bytes
-rw-r--r--src/runtime/time.go12
-rw-r--r--src/runtime/traceback.go2
-rw-r--r--src/syscall/exec_linux.go4
-rw-r--r--test/fixedbugs/issue27938.go6
-rw-r--r--test/fixedbugs/issue53600.go11
-rw-r--r--test/fixedbugs/issue53600.out1
-rw-r--r--test/fixedbugs/issue53635.go31
-rw-r--r--test/fixedbugs/issue53653.go42
-rw-r--r--test/fixedbugs/issue53653.out8
-rw-r--r--test/loopbce.go65
-rw-r--r--test/run.go1
-rw-r--r--test/typeparam/issue53762.go18
143 files changed, 3559 insertions, 995 deletions
diff --git a/AUTHORS b/AUTHORS
index bb07bd52e8..0507e04838 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1438,6 +1438,7 @@ Wei Fu <fhfuwei@163.com>
Wei Guangjing <vcc.163@gmail.com>
Weichao Tang <tevic.tt@gmail.com>
Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com>
+Weizhi Yan <yanweizhi@bytedance.com>
Wembley G. Leach, Jr <wembley.gl@gmail.com>
Wen Yang <yangwen.yw@gmail.com>
Will Faught <will.faught@gmail.com>
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index b0314a0ffb..45020d6c45 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -25,10 +25,12 @@
# Please keep the list sorted.
Aamir Khan <syst3m.w0rm@gmail.com>
+Aaqa Ishtyaq <aaqaishtyaq@gmail.com>
Aaron Beitch <aaronb@arista.com>
Aaron Bieber <deftly@gmail.com>
Aaron Cannon <cannona@fireantproductions.com>
Aaron France <aaron.l.france@gmail.com>
+Aaron Gable <aaron@letsencrypt.org>
Aaron Jacobs <jacobsa@google.com>
Aaron Jensen <jensen.aaro@gmail.com>
Aaron Kemp <kemp.aaron@gmail.com>
@@ -38,6 +40,7 @@ Aaron Stein <aaronstein12@gmail.com>
Aaron Torres <tcboox@gmail.com>
Aaron Zinman <aaron@azinman.com>
Aarti Parikh <aarti.parikh@gmail.com>
+Aayush Agrawal <aayushagrawal.1111@gmail.com>
Abdullah Al Maruf <mymail.maruf@gmail.com>
Abe Haskins <abeisgreat@abeisgreat.com>
Abhinav Gupta <abhinav.g90@gmail.com>
@@ -49,6 +52,7 @@ Adam Kisala <adam.kisala@gmail.com>
Adam Langley <agl@golang.org>
Adam Medzinski <adam.medzinski@gmail.com>
Adam Mitha <adam.mitha@gmail.com>
+Adam Pritchard <pritchard.adam@gmail.com>
Adam Shannon <adamkshannon@gmail.com>
Adam Shelton <aashelt90@gmail.com>
Adam Sindelar <adamsh@google.com>
@@ -98,8 +102,10 @@ Alberto Donizetti <alb.donizetti@gmail.com>
Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com>
Alec Benzer <alec.benzer@gmail.com>
Alejandro García Montoro <alejandro.garciamontoro@gmail.com>
+Alejandro Sáez <asm@redhat.com>
Aleksa Sarai <cyphar@cyphar.com>
Aleksandar Dezelin <dezelin@gmail.com>
+Aleksandr Dobkin <dobkin@google.com>
Aleksandr Lukinykh <a.lukinykh@xsolla.com>
Aleksandr Razumov <ar@cydev.ru>
Alekseev Artem <a.artem060@gmail.com>
@@ -107,6 +113,7 @@ Aleksei Tirman <aleksei.tirman@jetbrains.com>
Alessandro Arzilli <alessandro.arzilli@gmail.com>
Alessandro Baffa <alessandro.baffa@gmail.com>
Alex A Skinner <alex@lx.lc>
+Alex Brachet <abrachet@google.com>
Alex Brainman <alex.brainman@gmail.com>
Alex Bramley <abramley@google.com>
Alex Browne <stephenalexbrowne@gmail.com>
@@ -165,6 +172,7 @@ Alexey Borzenkov <snaury@gmail.com>
Alexey Naidonov <alexey.naidyonov@gmail.com>
Alexey Neganov <neganovalexey@gmail.com>
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
+Alexey Perevalov <alexej.perevalov@gmail.com>
Alexey Semenyuk <alexsemenyuk88@gmail.com>
Alexey Vilenskiy <bynovhack@gmail.com>
Alexis Hildebrandt <surryhill@gmail.com>
@@ -275,6 +283,7 @@ Anthony Alves <cvballa3g0@gmail.com>
Anthony Canino <anthony.canino1@gmail.com>
Anthony Eufemio <anthony.eufemio@gmail.com>
Anthony Fok <foka@debian.org>
+Anthony Hamon <antham@users.noreply.github.com>
Anthony Martin <ality@pbrane.org>
Anthony Sottile <asottile@umich.edu>
Anthony Starks <ajstarks@gmail.com>
@@ -335,6 +344,7 @@ Avi Flax <avi@timehop.com>
Aviv Klasquin Komissar <avivklas@gmail.com>
awaw fumin <awawfumin@gmail.com>
Awn Umar <awn@cryptolosophy.io>
+Axel Busch <94176305+abuschIBM@users.noreply.github.com>
Axel Wagner <axel.wagner.hh@googlemail.com>
Ayan George <ayan@ayan.net>
Ayanamist Yang <ayanamist@gmail.com>
@@ -364,6 +374,7 @@ Ben Laurie <ben@links.org> <benl@google.com>
Ben Lubar <ben.lubar@gmail.com>
Ben Lynn <benlynn@gmail.com>
Ben Olive <sionide21@gmail.com>
+Ben Sarah Golightly <golightly.ben@googlemail.com>
Ben Schwartz <bemasc@google.com>
Ben Shi <powerman1st@163.com> <ben.shi@streamcomputing.com>
Ben Toews <mastahyeti@gmail.com>
@@ -379,6 +390,7 @@ Benny Siegert <bsiegert@gmail.com>
Benoit Sigoure <tsunanet@gmail.com>
Berengar Lehr <Berengar.Lehr@gmx.de>
Berkant Ipek <41230766+0xbkt@users.noreply.github.com>
+Bernhard Valenti <bernhard.valenti@gmail.com>
Beth Brown <ecbrown@google.com>
Bharath Kumar Uppala <uppala.bharath@gmail.com>
Bharath Thiruveedula <tbharath91@gmail.com>
@@ -429,6 +441,7 @@ Brave Cow <rsr715@gmail.com>
Brayden Cloud <bcloud@google.com>
Brendan Daniel Tracey <tracey.brendan@gmail.com>
Brendan O'Dea <bod@golang.org>
+Breno Andrade <breno.andrade.dev@gmail.com>
Brett Cannon <bcannon@gmail.com>
Brett Merrill <brett.j.merrill94@gmail.com>
Brian Dellisanti <briandellisanti@gmail.com>
@@ -498,6 +511,7 @@ Charles Lee <zombie.fml@gmail.com>
Charles Weill <weill@google.com>
Charlie Getzen <charlie@bolt.com>
Charlie Moog <moogcharlie@gmail.com>
+Charlie Vieth <charlie.vieth@gmail.com>
Charlotte Brandhorst-Satzkorn <catzkorn@gmail.com>
Chauncy Cullitan <chauncyc@google.com>
Chen Zhidong <njutczd@gmail.com>
@@ -540,6 +554,7 @@ Christian Himpel <chressie@googlemail.com> <chressie@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com>
Christian Pellegrin <chri@evolware.org>
Christian R. Petrin <christianpetrin@gmail.com>
+Christian Stewart <christian@paral.in>
Christian Svensson <blue@cmd.nu>
Christine Hansmann <chhansmann@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
@@ -580,7 +595,9 @@ Corne van der Plas <vdplas@gmail.com>
Cosmos Nicolaou <cnicolaou@google.com>
Costin Chirvasuta <ctin@google.com>
Craig Citro <craigcitro@google.com>
+Cristian Greco <sacrogemini@gmail.com>
Cristian Staretu <unclejacksons@gmail.com>
+Cristiano Vitorino <57003922+cristianovitorino@users.noreply.github.com>
Cristo García <cgg.code@gmail.com>
cui fliter <imcusg@gmail.com>
Cuihtlauac ALVARADO <cuihtlauac.alvarado@orange.com>
@@ -613,6 +630,7 @@ Daniel Cormier <danielc@knowbe4.com>
Daniël de Kok <me@danieldk.eu>
Daniel Fleischman <danielfleischman@gmail.com>
Daniel Ingram <ingramds@appstate.edu>
+Daniel Jakots <danieljakots@gmail.com>
Daniel Johansson <dajo2002@gmail.com>
Daniel Kerwin <d.kerwin@gini.net>
Daniel Kessler <dkess@google.com>
@@ -688,6 +706,7 @@ David R. Jenni <david.r.jenni@gmail.com>
David Sansome <me@davidsansome.com>
David Stainton <dstainton415@gmail.com>
David Symonds <dsymonds@golang.org>
+David Taylor <tinystatemachine@gmail.com>
David Thomas <davidthomas426@gmail.com>
David Timm <dtimm@pivotal.io>
David Titarenco <david.titarenco@gmail.com>
@@ -695,6 +714,7 @@ David Tolpin <david.tolpin@gmail.com>
David Url <david@urld.io>
David Volquartz Lebech <david@lebech.info>
David Wimmer <davidlwimmer@gmail.com>
+Davide Masserut <d.masserut@gmail.com>
Davies Liu <davies.liu@gmail.com>
Davor Kapsa <davor.kapsa@gmail.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com>
@@ -762,6 +782,7 @@ Donovan Hide <donovanhide@gmail.com>
Doug Anderson <douga@google.com>
Doug Fawley <dfawley@google.com>
Douglas Danger Manley <doug.manley@gmail.com>
+Dragan Mladjenovic <Dragan.Mladjenovic@syrmia.com>
Drew Flower <drewvanstone@gmail.com>
Drew Hintz <adhintz@google.com>
Drew Richardson <drewrichardson@gmail.com>
@@ -774,6 +795,7 @@ Dustin Sallings <dsallings@gmail.com>
Dustin Shields-Cloues <dcloues@gmail.com>
Dustin Spicuzza <dustin.spicuzza@gmail.com>
Dvir Volk <dvir@everything.me> <dvirsky@gmail.com>
+Dylan Le <dungtuanle@google.com>
Dylan Waits <dylan@waits.io>
Ed Schouten <ed@nuxi.nl>
Edan Bedrik <3d4nb3@gmail.com>
@@ -785,6 +807,7 @@ Eduardo Villaseñor <evillasrmx@gmail.com>
Edward Muller <edwardam@interlix.com>
Egon Elbre <egonelbre@gmail.com>
Ehren Kret <ehren.kret@gmail.com>
+Eisuke Takahashi <eisuke.takahashi.home@gmail.com>
Eitan Adler <lists@eitanadler.com>
Eivind Uggedal <eivind@uggedal.com>
El Mostafa Idrassi <el.mostafa.idrassi@gmail.com>
@@ -882,6 +905,7 @@ Fernandez Ludovic <lfernandez.dev@gmail.com>
Filip Gruszczyński <gruszczy@gmail.com>
Filip Haglund <drathier@users.noreply.github.com>
Filip Stanis <fstanis@google.com>
+Filippo Rossi <filipporossi@hey.com>
Filippo Valsorda <filippo@golang.org> <filippo@cloudflare.com> <hi@filippo.io>
Firmansyah Adiputra <frm.adiputra@gmail.com>
Florian Forster <octo@google.com>
@@ -965,19 +989,24 @@ GitHub User @ajnirp (1688456) <ajnirp@users.noreply.github.com>
GitHub User @ajz01 (4744634) <ajzdenek@gmail.com>
GitHub User @alkesh26 (1019076) <alkesh26@gmail.com>
GitHub User @andig (184815) <cpuidle@gmx.de>
+GitHub User @AndreasHGK (36563463) <andreaselbergs@outlook.com>
GitHub User @andrius4669 (4699695) <andrius4669@gmail.com>
+GitHub User @ardnew (3837367) <andrew@ardnew.com>
+GitHub User @ariathaker (51683211) <ariathaker@gmail.com>
GitHub User @as (8127015) <as.utf8@gmail.com>
GitHub User @bakape (7851952) <bakape@gmail.com>
GitHub User @bgadrian (830001) <aditza8@gmail.com>
GitHub User @bontequero (2674999) <bontequero@gmail.com>
GitHub User @cch123 (384546) <buaa.cch@gmail.com>
GitHub User @chainhelen (7046329) <chainhelen@gmail.com>
+GitHub User @champly (15027259) <champly1993@gmail.com>
GitHub User @chanxuehong (3416908) <chanxuehong@gmail.com>
GitHub User @Cluas (10056928) <Cluas@live.cn>
GitHub User @cncal (23520240) <flycalvin@qq.com>
GitHub User @DQNEO (188741) <dqneoo@gmail.com>
GitHub User @Dreamacro (8615343) <chuainian@gmail.com>
GitHub User @dupoxy (1143957) <dupoxy@users.noreply.github.com>
+GitHub User @eh-steve (16373174) <eh.steve.99@gmail.com>
GitHub User @EndlessCheng (7086966) <loli.con@qq.com>
GitHub User @erifan (31343225) <eric.fang@arm.com>
GitHub User @esell (9735165) <eujon.sellers@gmail.com>
@@ -987,6 +1016,7 @@ GitHub User @geedchin (11672310) <geedchin@gmail.com>
GitHub User @GrigoriyMikhalkin (3637857) <grigoriymikhalkin@gmail.com>
GitHub User @Gusted (25481501) <williamzijl7@hotmail.com>
GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com>
+GitHub User @hidu (613972) <duv123@gmail.com>
GitHub User @hitzhangjie (3725760) <hit.zhangjie@gmail.com>
GitHub User @hkhere (33268704) <33268704+hkhere@users.noreply.github.com>
GitHub User @hqpko (13887251) <whaibin01@hotmail.com>
@@ -994,8 +1024,10 @@ GitHub User @Illirgway (5428603) <illirgway@gmail.com>
GitHub User @itchyny (375258) <itchyny@hatena.ne.jp>
GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com>
GitHub User @jopbrown (6345470) <msshane2008@gmail.com>
+GitHub User @Jorropo (24391983) <jorropo.pgm@gmail.com>
GitHub User @kazyshr (30496953) <kazyshr0301@gmail.com>
GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com>
+GitHub User @kkHAIKE (64772) <kkhaike@gmail.com>
GitHub User @komisan19 (18901496) <komiyama6219@gmail.com>
GitHub User @korzhao (64203902) <korzhao95@gmail.com>
GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com>
@@ -1012,8 +1044,11 @@ GitHub User @markruler (38225900) <csu0414@gmail.com>
GitHub User @Matts966 (28551465) <Matts966@users.noreply.github.com>
GitHub User @micnncim (21333876) <micnncim@gmail.com>
GitHub User @mkishere (224617) <224617+mkishere@users.noreply.github.com>
+GitHub User @mkontani (19817636) <itoama@live.jp>
+GitHub User @mstmdev (5756338) <mstmdev@gmail.com>
GitHub User @nu50218 (40682920) <nu_ll@icloud.com>
GitHub User @OlgaVlPetrova (44112727) <OVPpetrova@gmail.com>
+GitHub User @penglei (1140325) <penglei@ybyte.org>
GitHub User @pierwill (19642016) <pierwill@users.noreply.github.com>
GitHub User @pityonline (438222) <pityonline@gmail.com>
GitHub User @po3rin (29445112) <abctail30@gmail.com>
@@ -1027,6 +1062,7 @@ GitHub User @roudkerk (52280478) <roudkerk@google.com>
GitHub User @saitarunreddy (21041941) <saitarunreddypalla@gmail.com>
GitHub User @SataQiu (9354727) <shidaqiu2018@gmail.com>
GitHub User @seifchen (23326132) <chenxuefeng1207@gmail.com>
+GitHub User @sethbattin (1627760) <seth.battin@gmail.com>
GitHub User @shogo-ma (9860598) <Choroma194@gmail.com>
GitHub User @sivchari (55221074) <shibuuuu5@gmail.com>
GitHub User @skanehira (7888591) <sho19921005@gmail.com>
@@ -1037,6 +1073,7 @@ GitHub User @tangxi666 (48145175) <tx1275044634@gmail.com>
GitHub User @tatsumack (4510569) <tatsu.mack@gmail.com>
GitHub User @tell-k (26263) <ffk2005@gmail.com>
GitHub User @tennashi (10219626) <tennashio@gmail.com>
+GitHub User @thepudds (20628140) <thepudds@users.noreply.github.com>
GitHub User @uhei (2116845) <uhei@users.noreply.github.com>
GitHub User @uji (49834542) <ujiprog@gmail.com>
GitHub User @unbyte (5772358) <i@shangyes.net>
@@ -1048,6 +1085,7 @@ GitHub User @wolf1996 (5901874) <ksgiv37@gmail.com>
GitHub User @yah01 (12216890) <kagaminehuan@gmail.com>
GitHub User @yuanhh (1298735) <yuan415030@gmail.com>
GitHub User @zikaeroh (48577114) <zikaeroh@gmail.com>
+GitHub User @zlasd (9432027) <zlasd@hotmail.com>
GitHub User @ZZMarquis (7624583) <zhonglingjian3821@163.com>
Giulio Iotti <dullgiulio@gmail.com>
Giulio Micheloni <giulio.micheloni@gmail.com>
@@ -1067,6 +1105,7 @@ Greg Steuck <gnezdo+github@google.com>
Greg Thelen <gthelen@google.com>
Greg Ward <greg@gerg.ca>
Grégoire Delattre <gregoire.delattre@gmail.com>
+Grégoire Détrez <gregoire@fripost.org>
Gregory Man <man.gregory@gmail.com>
Gregory Petrosyan <gregory.petrosyan@gmail.com>
Guilherme Caruso <gui.martinscaruso@gmail.com>
@@ -1078,6 +1117,7 @@ Guillaume Blaquiere <guillaume.blaquiere@gmail.com>
Guillaume J. Charmes <guillaume@charmes.net>
Guillaume Sottas <guillaumesottas@gmail.com>
Günther Noack <gnoack@google.com>
+Guo Hui <gh73962@gmail.com>
Guobiao Mei <meiguobiao@gmail.com>
Guodong Li <guodongli@google.com>
Guoliang Wang <iamwgliang@gmail.com>
@@ -1128,6 +1168,7 @@ Herbert Georg Fischer <herbert.fischer@gmail.com>
Herbie Ong <herbie@google.com>
Heschi Kreinick <heschi@google.com>
Hidetatsu Yaginuma <ygnmhdtt@gmail.com>
+Hilário Coelho <hilario.coelho@securityside.com>
Hilko Bengen <bengen@hilluzination.de>
Himanshu Kishna Srivastava <28himanshu@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com>
@@ -1173,8 +1214,10 @@ Igor Dolzhikov <bluesriverz@gmail.com>
Igor Vashyst <ivashyst@gmail.com>
Igor Zhilianin <igor.zhilianin@gmail.com>
Ikko Ashimine <eltociear@gmail.com>
+Ilia Choly <ilia.choly@gmail.com>
Illya Yalovyy <yalovoy@gmail.com>
Ilya Chukov <56119080+Elias506@users.noreply.github.com>
+Ilya Leoshkevich <iii@linux.ibm.com>
Ilya Mateyko <me@astrophena.name>
Ilya Sinelnikov <sidhmangh@gmail.com>
Ilya Tocar <ilya.tocar@intel.com>
@@ -1274,6 +1317,7 @@ Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
Jannis Andrija Schnitzer <jannis@schnitzer.im>
Jared Allard <jaredallard@users.noreply.github.com>
Jared Culp <jculp14@gmail.com>
+Jared Horvat <horvski@gmail.com>
Jaroslavas Počepko <jp@webmaster.ms>
Jason A. Donenfeld <Jason@zx2c4.com>
Jason Baker <jason-baker@users.noreply.github.com>
@@ -1309,6 +1353,7 @@ Jeevanandam M <jeeva@myjeeva.com>
Jeff (Zhefu) Jiang <jeffjiang@google.com>
Jeff Craig <jeffcraig@google.com>
Jeff Dupont <jeff.dupont@gmail.com>
+Jeff Grafton <jgrafton@google.com>
Jeff Hodges <jeff@somethingsimilar.com>
Jeff Johnson <jrjohnson@google.com>
Jeff R. Allen <jra@nella.org> <jeff.allen@gmail.com>
@@ -1322,6 +1367,7 @@ Jens Frederich <jfrederich@gmail.com>
Jeremiah Harmsen <jeremiah@google.com>
Jeremy Banks <_@jeremy.ca>
Jeremy Canady <jcanady@gmail.com>
+Jeremy Chase <jeremy.chase@gmail.com>
Jeremy Faller <jeremy@golang.org>
Jeremy Jackins <jeremyjackins@gmail.com>
Jeremy Jay <jeremy@pbnjay.com>
@@ -1352,14 +1398,16 @@ Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Jin-wook Jeong <jeweljar@hanmail.net>
Jingcheng Zhang <diogin@gmail.com>
Jingguo Yao <yaojingguo@gmail.com>
-Jinzhu Zhang <wosmvp@gmail.com>
Jingnan Si <jingnan.si@gmail.com>
Jinkun Zhang <franksnolf@gmail.com>
Jinwen Wo <wojinwen@huawei.com>
+Jinwook Jeong <vustthat@gmail.com>
+Jinzhu Zhang <wosmvp@gmail.com>
Jiong Du <londevil@gmail.com>
Jirka Daněk <dnk@mail.muni.cz>
Jiulong Wang <jiulongw@gmail.com>
Joakim Sernbrant <serbaut@gmail.com>
+João Penteado <4219131+joaopenteado@users.noreply.github.com>
Jochen Weber <jochen.weber80@gmail.com>
Joe Bowbeer <joe.bowbeer@gmail.com>
Joe Cortopassi <joe@joecortopassi.com>
@@ -1383,8 +1431,10 @@ Johan Euphrosine <proppy@google.com>
Johan Jansson <johan.jansson@iki.fi>
Johan Knutzen <johan@senri.se>
Johan Sageryd <j@1616.se>
+Johan Van de Wauw <johan@gisky.be>
Johannes Altmanninger <aclopte@gmail.com>
Johannes Huning <johannes.huning@gmail.com>
+John Anthony <johnanthony.contrib@gmail.com>
John Asmuth <jasmuth@gmail.com>
John Bampton <jbampton@gmail.com>
John Beisley <huin@google.com>
@@ -1458,6 +1508,8 @@ Josh Deprez <josh.deprez@gmail.com>
Josh Goebel <dreamer3@gmail.com>
Josh Hoak <jhoak@google.com>
Josh Holland <jrh@joshh.co.uk>
+Josh Humphries <jh@fullstory.com>
+Josh Powers <jpowers@influxdata.com>
Josh Rickmar <jrick@companyzero.com>
Josh Roppo <joshroppo@gmail.com>
Josh Varga <josh.varga@gmail.com>
@@ -1564,11 +1616,13 @@ Kevin Klues <klueska@gmail.com> <klueska@google.com>
Kevin Malachowski <chowski@google.com>
Kevin Parsons <kevpar@microsoft.com>
Kevin Ruffin <kruffin@gmail.com>
+Kevin Smith <ksmith.nop@gmail.com>
Kevin Vu <kevin.m.vu@gmail.com>
Kevin Zita <bleedgreenandgold@gmail.com>
Keyan Pishdadian <kpishdadian@gmail.com>
Keyuan Li <keyuanli123@gmail.com>
Kezhu Wang <kezhuw@gmail.com>
+Khaled Yakdan <yakdan@code-intelligence.com>
Khosrow Moossavi <khos2ow@gmail.com>
Kieran Colford <kieran@kcolford.com>
Kieran Gorman <kieran.j.gorman@gmail.com>
@@ -1590,6 +1644,7 @@ Koki Tomoshige <tomocy.dev@gmail.com>
Komu Wairagu <komuw05@gmail.com>
Konstantin <konstantin8105@gmail.com>
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
+Koumei Mikuni <komata392@gmail.com>
Koya IWAMURA <kiwamura0314@gmail.com>
Kris Kwiatkowski <kris@cloudflare.com>
Kris Nova <kris@nivenly.com>
@@ -1625,6 +1680,7 @@ Lars Jeppesen <jeppesen.lars@gmail.com>
Lars Lehtonen <lars.lehtonen@gmail.com>
Lars Wiegman <lars@namsral.com>
Larz Conwell <larzconwell@gmail.com>
+Lasse Folger <lassefolger@google.com>
Laurent Voisin <lpvoisin@gmail.com>
Laurie Clark-Michalek <laurie@qubit.com>
LE Manh Cuong <cuong.manhle.vn@gmail.com>
@@ -1656,6 +1712,7 @@ Lorenz Nickel <mail@lorenznickel.de>
Lorenzo Masini <rugginoso@develer.com>
Lorenzo Stoakes <lstoakes@gmail.com>
Louis Kruger <louisk@google.com>
+Louis Portay <louisportay@gmail.com>
Luan Santos <cfcluan@gmail.com>
Lubomir I. Ivanov <neolit123@gmail.com>
Luca Bruno <luca.bruno@coreos.com>
@@ -1670,6 +1727,7 @@ Luigi Riefolo <luigi.riefolo@gmail.com>
Luit van Drongelen <luitvd@gmail.com>
Luka Zakrajšek <tr00.g33k@gmail.com>
Luka Zitnik <luka.zitnik@gmail.com>
+Lukas Joisten <luckuck.f95@gmail.com>
Lukasz Milewski <lmmilewski@gmail.com>
Luke Champine <luke.champine@gmail.com>
Luke Curley <qpingu@gmail.com>
@@ -1688,6 +1746,7 @@ Magnus Hiie <magnus.hiie@gmail.com>
Mahdi Hosseini Moghaddam <seyed.mahdi.hosseini.moghaddam@ibm.com>
Maia Lee <maia.lee@leftfieldlabs.com>
Maicon Costa <maiconscosta@gmail.com>
+Maisem Ali <maisem@tailscale.com>
Mak Kolybabi <mak@kolybabi.com>
Maksym Trykur <maksym.trykur@gmail.com>
Mal Curtis <mal@mal.co.nz>
@@ -1779,6 +1838,7 @@ Matheus Alcantara <matheusssilv97@gmail.com>
Mathias Beke <git@denbeke.be>
Mathias Hall-Andersen <mathias@hall-andersen.dk>
Mathias Leppich <mleppich@muhqu.de>
+Mathieu Aubin <mathieu@zeroserieux.com>
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
Mats Lidell <mats.lidell@cag.se> <mats.lidell@gmail.com>
Matt Aimonetti <mattaimonetti@gmail.com>
@@ -1795,6 +1855,7 @@ Matt Juran <thepciet@gmail.com>
Matt Layher <mdlayher@gmail.com> <mdlayher@planetscale.com>
Matt Masurka <masurka@google.com>
Matt Pearring <broskies@google.com>
+Matt Prahl <mprahl@redhat.com>
Matt Reiferson <mreiferson@gmail.com>
Matt Robenolt <matt@ydekproductions.com>
Matt Strong <mstrong1341@gmail.com>
@@ -1826,11 +1887,14 @@ Maxim Pimenov <mpimenov@google.com>
Maxim Pugachev <pugachev.mm@gmail.com>
Maxim Ushakov <ushakov@google.com>
Maxime de Roucy <maxime.deroucy@gmail.com>
+Maxime Soulé <zeptomax@gmail.com>
+Maxime Veber <nek.dev@gmail.com>
Máximo Cuadros Ortiz <mcuadros@gmail.com>
Maxwell Krohn <themax@gmail.com>
Maya Rashish <maya@NetBSD.org>
Mayank Kumar <krmayankk@gmail.com>
Mehrad Sadeghi <2012.linkinpark@gmail.com>
+Meidan Li <limeidan@loongson.cn>
Meir Fischer <meirfischer@gmail.com>
Meng Zhuo <mengzhuo1203@gmail.com> <mzh@golangcn.org>
Mhd Sulhan <m.shulhan@gmail.com>
@@ -1848,6 +1912,7 @@ Michael Ellis <micellis@justin.tv>
Michael Fraenkel <michael.fraenkel@gmail.com>
Michael Fromberger <michael.j.fromberger@gmail.com>
Michael Gehring <mg@ebfe.org> <gnirheg.leahcim@gmail.com>
+Michael Gross <info@komika.org>
Michael Henderson <mdhender@users.noreply.github.com>
Michael Hendricks <michael@ndrix.org>
Michael Hoisie <hoisie@gmail.com>
@@ -1909,6 +1974,7 @@ Mike Houston <mike@kothar.net>
Mike Kabischev <kabischev@gmail.com>
Mike Rosset <mike.rosset@gmail.com>
Mike Samuel <mikesamuel@gmail.com>
+Mike Seplowitz <mseplowitz@bloomberg.net>
Mike Solomon <msolo@gmail.com>
Mike Strosaker <strosake@us.ibm.com>
Mike Tsao <mike@sowbug.com>
@@ -1939,9 +2005,11 @@ Monty Taylor <mordred@inaugust.com>
Moritz Fain <moritz@fain.io>
Moriyoshi Koizumi <mozo@mozo.jp>
Morten Siebuhr <sbhr@sbhr.dk>
+Moshe Good <moshe@squareup.com>
Môshe van der Sterre <moshevds@gmail.com>
Mostafa Solati <mostafa.solati@gmail.com>
Mostyn Bramley-Moore <mostyn@antipode.se>
+Motiejus Jakštys <motiejus@jakstys.lt>
Mrunal Patel <mrunalp@gmail.com>
Muhammad Falak R Wani <falakreyaz@gmail.com>
Muhammad Hamza Farrukh <hamzafarrukh141@gmail.com>
@@ -2001,6 +2069,7 @@ Nick Robinson <nrobinson13@gmail.com>
Nick Sherron <nsherron90@gmail.com>
Nick Smolin <nick27surgut@gmail.com>
Nicolas BRULEZ <n.brulez@gmail.com>
+Nicolas Hillegeer <aktau@google.com>
Nicolas Kaiser <nikai@nikai.net>
Nicolas Owens <mischief@offblast.org>
Nicolas S. Dade <nic.dade@gmail.com>
@@ -2049,6 +2118,7 @@ Olivier Duperray <duperray.olivier@gmail.com>
Olivier Mengué <olivier.mengue@gmail.com>
Olivier Poitrey <rs@dailymotion.com>
Olivier Saingre <osaingre@gmail.com>
+Olivier Szika <olivier.szika@vadesecure.com>
Olivier Wulveryck <olivier.wulveryck@gmail.com>
Omar Jarjur <ojarjur@google.com>
Onkar Jadhav <omjadhav2610@gmail.com>
@@ -2069,6 +2139,7 @@ Panos Georgiadis <pgeorgiadis@suse.de>
Pantelis Sampaziotis <psampaz@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Paolo Martini <mrtnpaolo@gmail.com>
+Park Zhou <buildpaas@gmail.com>
Parker Moore <parkrmoore@gmail.com>
Parminder Singh <parmsingh101@gmail.com>
Pascal Dierich <pascal@pascaldierich.com>
@@ -2086,6 +2157,7 @@ Patrick Lee <pattyshack101@gmail.com>
Patrick Mézard <patrick@mezard.eu>
Patrick Mylund Nielsen <patrick@patrickmn.com>
Patrick Pelletier <pp.pelletier@gmail.com>
+Patrick Pokatilo <mail@shyxormz.net>
Patrick Riley <pfr@google.com>
Patrick Smith <pat42smith@gmail.com>
Patrik Lundin <patrik@sigterm.se>
@@ -2118,6 +2190,7 @@ Paul Wankadia <junyer@google.com>
Paulo Casaretto <pcasaretto@gmail.com>
Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
Paulo Gomes <paulo.gomes.uk@gmail.com>
+Pavel Gryaznov <grbitt@gmail.com>
Pavel Kositsyn <kositsyn.pa@phystech.edu>
Pavel Paulau <pavel.paulau@gmail.com>
Pavel Watson <watsonpavel@gmail.com>
@@ -2154,6 +2227,7 @@ Péter Szabó <pts@google.com>
Péter Szilágyi <peterke@gmail.com>
Peter Teichman <pteichman@fastly.com>
Peter Tseng <ptseng@squareup.com>
+Peter Verraedt <peter@verraedt.be>
Peter Waldschmidt <peter@waldschmidt.com>
Peter Waller <peter.waller@gmail.com>
Peter Weinberger <pjw@golang.org>
@@ -2162,6 +2236,8 @@ Peter Wu <pwu@cloudflare.com>
Peter Zhang <i@ddatsh.com>
Petr Jediný <petr.jediny@gmail.com>
Petrica Voicu <pvoicu@paypal.com>
+Phil Bracikowski <pbracikowski@influxdata.com>
+Phil Kulin <schors@gmail.com>
Phil Pearl <philip.j.r.pearl@gmail.com>
Phil Pennock <pdp@golang.org>
Philip Børgesen <philip.borgesen@gmail.com>
@@ -2171,6 +2247,7 @@ Philip K. Warren <pkwarren@gmail.com>
Philip Nelson <me@pnelson.ca>
Philipp Sauter <sauterp@protonmail.com>
Philipp Stephani <phst@google.com>
+Philippe Antoine <contact@catenacyber.fr>
Phillip Campbell <15082+phillc@users.noreply.github.com>
Pierre Carru <pierre.carru@eshard.com>
Pierre Durand <pierredurand@gmail.com>
@@ -2184,6 +2261,7 @@ Plekhanov Maxim <kishtatix@gmail.com>
Poh Zi How <poh.zihow@gmail.com>
Polina Osadcha <polliosa@google.com>
Pontus Leitzler <leitzler@gmail.com>
+Pooja Shyamsundar <poojashyam@ibm.com>
Povilas Versockas <p.versockas@gmail.com>
Prajwal Koirala <16564273+Prajwal-Koirala@users.noreply.github.com>
Prasanga Siripala <pj@pjebs.com.au>
@@ -2235,7 +2313,9 @@ Rebecca Stambler <rstambler@golang.org>
Reilly Watson <reillywatson@gmail.com>
Reinaldo de Souza Jr <juniorz@gmail.com>
Remi Gillig <remigillig@gmail.com>
+Remy Chantenay <remy.chantenay@gmail.com>
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
+Ren Kanai <rk2904powr@gmail.com>
Ren Ogaki <re.yuz77777@gmail.com>
Rens Rikkerink <Ikkerens@users.noreply.github.com>
Rhys Hiltner <rhys@justin.tv>
@@ -2301,8 +2381,10 @@ Romain Baugue <romain.baugue@elwinar.com>
Roman Budnikov <romanyx90@yandex.ru>
Roman Kollár <roman.kollar.0@gmail.com>
Roman Shchekin <mrqtros@gmail.com>
+Romanos Skiadas <rom.skiad@gmail.com>
Ron Hashimoto <mail@h2so5.net>
Ron Minnich <rminnich@gmail.com>
+Ronaldo Lanhellas <ronaldo.lanhellas@gmail.com>
Ronnie Ebrin <ebrin.ronnie@protonmail.com>
Ross Chater <rdchater@gmail.com>
Ross Kinsey <rossikinsey@gmail.com>
@@ -2341,6 +2423,7 @@ Sabin Mihai Rapan <sabin.rapan@gmail.com>
Sad Pencil <qh06@qq.com>
Sai Cheemalapati <saicheems@google.com>
Sai Kiran Dasika <kirandasika30@gmail.com>
+Sai Sunder <saisunder92@gmail.com>
Sakeven Jiang <jc5930@sina.cn>
Salaheddin M. Mahmud <salah.mahmud@gmail.com>
Salmān Aljammāz <s@0x65.net>
@@ -2392,6 +2475,7 @@ Sean Liao <seankhliao@gmail.com>
Sean Rees <sean@erifax.org>
Sebastiaan van Stijn <github@gone.nl>
Sebastian Chlopecki <sebsebmc@gmail.com>
+Sebastian Gassner <sepastian@users.noreply.github.com>
Sebastian Kinne <skinne@google.com>
Sebastian Schmidt <yath@google.com>
Sebastien Binet <seb.binet@gmail.com>
@@ -2428,6 +2512,7 @@ Shamim Akhtar <shamim.rhce@gmail.com>
Shane Hansen <shanemhansen@gmail.com>
Shang Jian Ding <sding3@ncsu.edu>
Shaozhen Ding <dsz0111@gmail.com>
+Shapor Naghibzadeh <shapor@gmail.com>
Shaquille Que <shaquille@golang.org>
Shaquille Wyan Que <shaqqywyan@gmail.com>
Shaun Dunning <shaun.dunning@uservoice.com>
@@ -2441,6 +2526,7 @@ Shengyu Zhang <shengyu.zhang@chaitin.com>
Shi Han Ng <shihanng@gmail.com>
ShihCheng Tu <mrtoastcheng@gmail.com>
Shijie Hao <haormj@gmail.com>
+Shiming Zhang <wzshiming@foxmail.com>
Shin Fan <shinfan@google.com>
Shinji Tanaka <shinji.tanaka@gmail.com>
Shinnosuke Sawada <6warashi9@gmail.com>
@@ -2455,6 +2541,7 @@ Shuai Tan <hopehook.com@gmail.com> <hopehook@qq.com>
Shubham Sharma <shubham.sha12@gmail.com>
Shuhei Takahashi <nya@chromium.org>
Shun Fan <sfan@google.com>
+Silke Hofstra <silke@slxh.eu>
Silvan Jegen <s.jegen@gmail.com>
Simão Gomes Viana <simaogmv@gmail.com>
Simarpreet Singh <simar@linux.com>
@@ -2470,6 +2557,7 @@ Simon Thulbourn <simon+github@thulbourn.com>
Simon Whitehead <chemnova@gmail.com>
Sina Siadat <siadat@gmail.com>
Sjoerd Siebinga <sjoerd.siebinga@gmail.com>
+Sofía Celi <cherenkovd69@gmail.com>
Sokolov Yura <funny.falcon@gmail.com>
Song Gao <song@gao.io>
Song Lim <songlim327@gmail.com>
@@ -2498,6 +2586,7 @@ Stephan Klatt <stephan.klatt@gmail.com>
Stephan Renatus <srenatus@chef.io>
Stephan Zuercher <zuercher@gmail.com>
Stéphane Travostino <stephane.travostino@gmail.com>
+Stephen Eckels <stevemk14ebr@gmail.com>
Stephen Lewis <stephen@sock.org.uk>
Stephen Lu <steuhs@users.noreply.github.com>
Stephen Ma <stephenm@golang.org>
@@ -2517,6 +2606,7 @@ Steven Buss <sbuss@google.com>
Steven Elliot Harris <seharris@gmail.com>
Steven Erenst <stevenerenst@gmail.com>
Steven Hartland <steven.hartland@multiplay.co.uk>
+Steven Johnstone <steven.james.johnstone@gmail.com>
Steven Littiebrant <imgroxx@gmail.com>
Steven Maude <git@stevenmaude.co.uk>
Steven Wilkin <stevenwilkin@gmail.com>
@@ -2561,6 +2651,7 @@ Tao Wang <twang2218@gmail.com>
Tarmigan Casebolt <tarmigan@gmail.com>
Taro Aoki <aizu.s1230022@gmail.com>
Taru Karttunen <taruti@taruti.net>
+Tatiana Bradley <tatiana@golang.org>
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
Tatsuya Kaneko <m.ddotx.f@gmail.com>
Taufiq Rahman <taufiqrx8@gmail.com>
@@ -2686,6 +2777,7 @@ Uriel Mangado <uriel@berlinblue.org>
Urvil Patel <patelurvil38@gmail.com>
Utkarsh Dixit <53217283+utkarsh-extc@users.noreply.github.com>
Uttam C Pawar <uttam.c.pawar@intel.com>
+Uzondu Enudeme <uzondu@orijtech.com>
Vadim Grek <vadimprog@gmail.com>
Vadim Vygonets <unixdj@gmail.com>
Val Polouchkine <vpolouch@justin.tv>
@@ -2723,7 +2815,7 @@ Vladimir Mihailenco <vladimir.webdev@gmail.com>
Vladimir Nikishenko <vova616@gmail.com>
Vladimir Stefanovic <vladimir.stefanovic@imgtec.com>
Vladimir Varankin <nek.narqo@gmail.com>
-Vojtech Bocek <vbocek@gmail.com>
+Vojtěch Boček <vojtech.bocek@avast.com> <vbocek@gmail.com>
Volker Dobler <dr.volker.dobler@gmail.com>
Volodymyr Paprotski <vpaprots@ca.ibm.com>
Vyacheslav Pachkov <slava.pach@gmail.com>
@@ -2733,8 +2825,10 @@ Wagner Riffel <wgrriffel@gmail.com>
Walt Della <walt@javins.net>
Walter Poupore <wpoupore@google.com>
Wander Lairson Costa <wcosta@mozilla.com>
+Wang Deyu <wangdeyu.2021@bytedance.com>
Wang Xuerui <git@xen0n.name>
Warren Fernandes <warren.f.fernandes@gmail.com>
+Watson Ladd <watson@cloudflare.com>
Wayne Ashley Berry <wayneashleyberry@gmail.com>
Wayne Zuo <wdvxdr1123@gmail.com> <wdvxdr@golangcn.org>
Wedson Almeida Filho <wedsonaf@google.com>
@@ -2747,6 +2841,7 @@ Wei Xikai <xykwei@gmail.com>
Weichao Tang <tevic.tt@gmail.com>
Weilu Jia <optix2000@gmail.com>
Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com>
+Weizhi Yan <yanweizhi@bytedance.com>
Wembley G. Leach, Jr <wembley.gl@gmail.com>
Wen Yang <yangwen.yw@gmail.com>
Wenlei (Frank) He <wlhe@google.com>
@@ -2756,6 +2851,7 @@ Wilfried Teiken <wteiken@google.com>
Will Beason <willbeason@gmail.com>
Will Chan <willchan@google.com>
Will Faught <will.faught@gmail.com>
+Will Hawkins <whh8b@obs.cr>
Will Morrow <wmorrow.qdt@qualcommdatacenter.com>
Will Norris <willnorris@google.com>
Will Storey <will@summercat.com>
@@ -2801,6 +2897,7 @@ Yestin Sun <ylh@pdx.edu>
Yesudeep Mangalapilly <yesudeep@google.com>
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
Yo-An Lin <yoanlin93@gmail.com>
+Yogesh Mangaj <yogesh.mangaj@gmail.com>
Yohei Takeda <yo.tak0812@gmail.com>
Yongjian Xu <i3dmaster@gmail.com>
Yorman Arias <cixtords@gmail.com>
@@ -2829,6 +2926,7 @@ Yuval Pavel Zholkover <paulzhol@gmail.com>
Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>
Zach Bintliff <zbintliff@gmail.com>
+Zach Collier <zamicol@gmail.com>
Zach Gershman <zachgersh@gmail.com>
Zach Hoffman <zrhoffman@apache.org>
Zach Jones <zachj1@gmail.com>
@@ -2838,6 +2936,7 @@ Zachary Gershman <zgershman@pivotal.io>
Zaiyang Li <zaiyangli777@gmail.com>
Zak <zrjknill@gmail.com>
Zakatell Kanda <hi@zkanda.io>
+Zeke Lu <lvzecai@gmail.com>
Zellyn Hunter <zellyn@squareup.com> <zellyn@gmail.com>
Zev Goldstein <zev.goldstein@gmail.com>
Zhang Boyang <zhangboyang.id@gmail.com>
@@ -2858,6 +2957,7 @@ Zvonimir Pavlinovic <zpavlinovic@google.com>
Zyad A. Ali <zyad.ali.me@gmail.com>
Максадбек Ахмедов <a.maksadbek@gmail.com>
Максим Федосеев <max.faceless.frei@gmail.com>
+Михаил Патин <mixa1243@gmail.com>
Роман Хавроненко <hagen1778@gmail.com>
Тарас Буник <tbunyk@gmail.com>
Фахриддин Балтаев <faxriddinjon@gmail.com>
diff --git a/doc/go1.19.html b/doc/go1.19.html
index 53c11bd26e..41ffe8ebfd 100644
--- a/doc/go1.19.html
+++ b/doc/go1.19.html
@@ -118,6 +118,15 @@ as well as support for rendering them to HTML, Markdown, and text.
and <code>GOGCCFLAGS</code> variables it reports.
</p>
+<p><!-- https://go.dev/issue/29666 -->
+ <code>go</code> <code>list</code> <code>-json</code> now accepts a
+ comma-separated list of JSON fields to populate. If a list is specified,
+ the JSON output will include only those fields, and
+ <code>go</code> <code>list</code> may avoid work to compute fields that are
+ not included. In some cases, this may suppress errors that would otherwise
+ be reported.
+</p>
+
<p><!-- CL 410821 -->
The <code>go</code> command now caches information necessary to load some modules,
which should result in a speed-up of some <code>go</code> <code>list</code> invocations.
diff --git a/misc/cgo/test/setgid2_linux.go b/misc/cgo/test/setgid2_linux.go
index d239893f43..9069cff334 100644
--- a/misc/cgo/test/setgid2_linux.go
+++ b/misc/cgo/test/setgid2_linux.go
@@ -20,7 +20,10 @@ import (
)
func testSetgidStress(t *testing.T) {
- const N = 1000
+ var N = 1000
+ if testing.Short() {
+ N = 50
+ }
ch := make(chan int, N)
for i := 0; i < N; i++ {
go func() {
diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go
index d36b97b70e..c409c317dc 100644
--- a/misc/cgo/testcarchive/carchive_test.go
+++ b/misc/cgo/testcarchive/carchive_test.go
@@ -205,6 +205,7 @@ func genHeader(t *testing.T, header, dir string) {
func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
t.Helper()
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -238,7 +239,7 @@ func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
binArgs := append(cmdToRun(exe), "arg1", "arg2")
cmd = exec.Command(binArgs[0], binArgs[1:]...)
if runtime.Compiler == "gccgo" {
- cmd.Env = append(os.Environ(), "GCCGO=1")
+ cmd.Env = append(cmd.Environ(), "GCCGO=1")
}
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -822,9 +823,15 @@ func TestPIE(t *testing.T) {
t.Skipf("skipping PIE test on %s", GOOS)
}
+ libgoa := "libgo.a"
+ if runtime.Compiler == "gccgo" {
+ libgoa = "liblibgo.a"
+ }
+
if !testWork {
defer func() {
os.Remove("testp" + exeSuffix)
+ os.Remove(libgoa)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -837,18 +844,13 @@ func TestPIE(t *testing.T) {
// be running this test in a GOROOT owned by root.)
genHeader(t, "p.h", "./p")
- cmd := exec.Command("go", "install", "-buildmode=c-archive", "./libgo")
+ cmd := exec.Command("go", "build", "-buildmode=c-archive", "./libgo")
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
- libgoa := "libgo.a"
- if runtime.Compiler == "gccgo" {
- libgoa = "liblibgo.a"
- }
-
- ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join(libgodir, libgoa))
+ ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", libgoa)
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
@@ -1035,6 +1037,7 @@ func TestCachedInstall(t *testing.T) {
buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -1050,6 +1053,7 @@ func TestCachedInstall(t *testing.T) {
}
cmd = exec.Command(buildcmd[0], buildcmd[1:]...)
+ cmd.Env = append(cmd.Environ(), "GO111MODULE=off")
t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go
index e4898778be..7fbcff24dd 100644
--- a/misc/cgo/testcshared/cshared_test.go
+++ b/misc/cgo/testcshared/cshared_test.go
@@ -151,16 +151,22 @@ func testMain(m *testing.M) int {
// The installation directory format varies depending on the platform.
output, err := exec.Command("go", "list",
"-buildmode=c-shared",
- "-installsuffix", "testcshared",
"-f", "{{.Target}}",
- "./libgo").CombinedOutput()
+ "runtime/cgo").CombinedOutput()
if err != nil {
log.Panicf("go list failed: %v\n%s", err, output)
}
- target := string(bytes.TrimSpace(output))
- libgoname = filepath.Base(target)
- installdir = filepath.Dir(target)
- libSuffix = strings.TrimPrefix(filepath.Ext(target), ".")
+ runtimeCgoTarget := string(bytes.TrimSpace(output))
+ libSuffix = strings.TrimPrefix(filepath.Ext(runtimeCgoTarget), ".")
+
+ defer func() {
+ if installdir != "" {
+ err := os.RemoveAll(installdir)
+ if err != nil {
+ log.Panic(err)
+ }
+ }
+ }()
return m.Run()
}
@@ -284,8 +290,13 @@ func createHeaders() error {
}
// Generate a C header file for libgo itself.
- args = []string{"go", "install", "-buildmode=c-shared",
- "-installsuffix", "testcshared", "./libgo"}
+ installdir, err = os.MkdirTemp("", "testcshared")
+ if err != nil {
+ return err
+ }
+ libgoname = "libgo." + libSuffix
+
+ args = []string{"go", "build", "-buildmode=c-shared", "-o", filepath.Join(installdir, libgoname), "./libgo"}
cmd = exec.Command(args[0], args[1:]...)
out, err = cmd.CombinedOutput()
if err != nil {
@@ -373,6 +384,7 @@ func createHeadersOnce(t *testing.T) {
headersErr = createHeaders()
})
if headersErr != nil {
+ t.Helper()
t.Fatal(headersErr)
}
}
@@ -705,12 +717,15 @@ func TestCachedInstall(t *testing.T) {
copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "libgo", "libgo.go"), filepath.Join("libgo", "libgo.go"))
copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "p", "p.go"), filepath.Join("p", "p.go"))
- env := append(os.Environ(), "GOPATH="+tmpdir, "GOBIN="+filepath.Join(tmpdir, "bin"))
-
buildcmd := []string{"go", "install", "-x", "-buildmode=c-shared", "-installsuffix", "testcshared", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Dir = filepath.Join(tmpdir, "src", "testcshared")
+ env := append(cmd.Environ(),
+ "GOPATH="+tmpdir,
+ "GOBIN="+filepath.Join(tmpdir, "bin"),
+ "GO111MODULE=off", // 'go install' only works in GOPATH mode
+ )
cmd.Env = env
t.Log(buildcmd)
out, err := cmd.CombinedOutput()
diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go
index 024f084da5..756c4baa6b 100644
--- a/misc/cgo/testshared/shared_test.go
+++ b/misc/cgo/testshared/shared_test.go
@@ -108,6 +108,15 @@ func testMain(m *testing.M) (int, error) {
defer os.RemoveAll(workDir)
}
+ // -buildmode=shared fundamentally does not work in module mode.
+ // (It tries to share package dependencies across builds, but in module mode
+ // each module has its own distinct set of dependency versions.)
+ // We would like to eliminate it (see https://go.dev/issue/47788),
+ // but first need to figure out a replacement that covers the small subset
+ // of use-cases where -buildmode=shared still works today.
+ // For now, run the tests in GOPATH mode only.
+ os.Setenv("GO111MODULE", "off")
+
// Some tests need to edit the source in GOPATH, so copy this directory to a
// temporary directory and chdir to that.
gopath := filepath.Join(workDir, "gopath")
diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go
index b8862f62cf..60e12630c5 100644
--- a/src/cmd/compile/doc.go
+++ b/src/cmd/compile/doc.go
@@ -68,9 +68,6 @@ Flags:
-importcfg file
Read import configuration from file.
In the file, set importmap, packagefile to specify import resolution.
- -importmap old=new
- Interpret import "old" as import "new" during compilation.
- The option may be repeated to add multiple mappings.
-installsuffix suffix
Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
instead of $GOROOT/pkg/$GOOS_$GOARCH.
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
index 5e1493e275..39ce8e66f7 100644
--- a/src/cmd/compile/internal/base/base.go
+++ b/src/cmd/compile/internal/base/base.go
@@ -70,7 +70,6 @@ var NoInstrumentPkgs = []string{
"runtime/msan",
"runtime/asan",
"internal/cpu",
- "buildcfg",
}
// Don't insert racefuncenter/racefuncexit into the following packages.
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index df828940ac..f2728d972f 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -100,7 +100,6 @@ type CmdFlags struct {
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\""
- ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
InstallSuffix string "help:\"set pkg directory `suffix`\""
JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
Lang string "help:\"Go language version source code expects\""
@@ -130,7 +129,7 @@ type CmdFlags struct {
Files map[string]string
}
ImportDirs []string // appended to by -I
- ImportMap map[string]string // set by -importmap OR -importcfg
+ ImportMap map[string]string // set by -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as
@@ -156,7 +155,6 @@ func ParseFlags() {
Flag.EmbedCfg = readEmbedCfg
Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg
- Flag.ImportMap = addImportMap
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
@@ -389,21 +387,6 @@ func addImportDir(dir string) {
}
}
-func addImportMap(s string) {
- if Flag.Cfg.ImportMap == nil {
- Flag.Cfg.ImportMap = make(map[string]string)
- }
- if strings.Count(s, "=") != 1 {
- log.Fatal("-importmap argument must be of the form source=actual")
- }
- i := strings.Index(s, "=")
- source, actual := s[:i], s[i+1:]
- if source == "" || actual == "" {
- log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
- }
- Flag.Cfg.ImportMap[source] = actual
-}
-
func readImportCfg(file string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
index 98c0ffa5b0..48f5eccf34 100644
--- a/src/cmd/compile/internal/ir/mini.go
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
//go:generate go run -mod=mod mknode.go
+// Note: see comment at top of mknode.go
package ir
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
index 5a0aaadf16..af8869d122 100644
--- a/src/cmd/compile/internal/ir/mknode.go
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -5,6 +5,12 @@
//go:build ignore
// +build ignore
+// Note: this program must be run with the GOROOT
+// environment variable set to the root of this tree.
+// GOROOT=...
+// cd $GOROOT/src/cmd/compile/internal/ir
+// ../../../../../bin/go run -mod=mod mknode.go
+
package main
import (
@@ -154,6 +160,9 @@ func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue
}
tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg))
+ if what == "go/constant.Value" {
+ return false
+ }
if implementsNode(typ) {
if slice != nil {
helper := strings.TrimPrefix(what, "*") + "s"
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index cf2f0b38db..1ba561b8b9 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -1214,6 +1214,9 @@ func (subst *subster) node(n ir.Node) ir.Node {
if m.Tag != nil && m.Tag.Op() == ir.OTYPESW {
break // Nothing to do here for type switches.
}
+ if m.Tag != nil && !types.IsComparable(m.Tag.Type()) {
+ break // Nothing to do here for un-comparable types.
+ }
if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() {
// To implement a switch on a value that is or has a type parameter, we first convert
// that thing we're switching on to an interface{}.
@@ -1354,7 +1357,7 @@ func (g *genInst) dictPass(info *instInfo) {
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
dt := m.(*ir.TypeAssertExpr)
- if !dt.Type().HasShape() && !dt.X.Type().HasShape() {
+ if !dt.Type().HasShape() && !(dt.X.Type().HasShape() && !dt.X.Type().IsEmptyInterface()) {
break
}
var rtype, itab ir.Node
@@ -1654,12 +1657,14 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
se := call.X.(*ir.SelectorExpr)
if se.X.Type().IsShape() {
// This is a method call enabled by a type bound.
-
- // We need this extra check for method expressions,
- // which don't add in the implicit XDOTs.
- tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
- tmpse = typecheck.AddImplicitDots(tmpse)
- tparam := tmpse.X.Type()
+ tparam := se.X.Type()
+ if call.X.Op() == ir.ODOTMETH {
+ // We need this extra check for method expressions,
+ // which don't add in the implicit XDOTs.
+ tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
+ tmpse = typecheck.AddImplicitDots(tmpse)
+ tparam = tmpse.X.Type()
+ }
if !tparam.IsShape() {
// The method expression is not
// really on a typeparam.
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
index a934cd2c7b..22fb5118ce 100644
--- a/src/cmd/compile/internal/ssa/loopbce.go
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/base"
"fmt"
"math"
)
@@ -90,41 +91,42 @@ func findIndVar(f *Func) []indVar {
continue
}
- var flags indVarFlags
- var ind, max *Value // induction, and maximum
+ var ind *Value // induction variable
+ var init *Value // starting value
+ var limit *Value // ending value
- // Check thet the control if it either ind </<= max or max >/>= ind.
+ // Check thet the control if it either ind </<= limit or limit </<= ind.
// TODO: Handle 32-bit comparisons.
// TODO: Handle unsigned comparisons?
c := b.Controls[0]
+ inclusive := false
switch c.Op {
case OpLeq64:
- flags |= indVarMaxInc
+ inclusive = true
fallthrough
case OpLess64:
- ind, max = c.Args[0], c.Args[1]
+ ind, limit = c.Args[0], c.Args[1]
default:
continue
}
// See if this is really an induction variable
less := true
- min, inc, nxt := parseIndVar(ind)
- if min == nil {
+ init, inc, nxt := parseIndVar(ind)
+ if init == nil {
// We failed to parse the induction variable. Before punting, we want to check
- // whether the control op was written with arguments in non-idiomatic order,
- // so that we believe being "max" (the upper bound) is actually the induction
- // variable itself. This would happen for code like:
- // for i := 0; len(n) > i; i++
- min, inc, nxt = parseIndVar(max)
- if min == nil {
+ // whether the control op was written with the induction variable on the RHS
+ // instead of the LHS. This happens for the downwards case, like:
+ // for i := len(n)-1; i >= 0; i--
+ init, inc, nxt = parseIndVar(limit)
+ if init == nil {
// No recognied induction variable on either operand
continue
}
// Ok, the arguments were reversed. Swap them, and remember that we're
// looking at a ind >/>= loop (so the induction must be decrementing).
- ind, max = max, ind
+ ind, limit = limit, ind
less = false
}
@@ -138,8 +140,8 @@ func findIndVar(f *Func) []indVar {
}
// Increment sign must match comparison direction.
- // When incrementing, the termination comparison must be ind </<= max.
- // When decrementing, the termination comparison must be ind >/>= max.
+ // When incrementing, the termination comparison must be ind </<= limit.
+ // When decrementing, the termination comparison must be ind >/>= limit.
// See issue 26116.
if step > 0 && !less {
continue
@@ -148,177 +150,229 @@ func findIndVar(f *Func) []indVar {
continue
}
- // If the increment is negative, swap min/max and their flags
- if step < 0 {
- min, max = max, min
- oldf := flags
- flags = indVarMaxInc
- if oldf&indVarMaxInc == 0 {
- flags |= indVarMinExc
- }
- step = -step
- }
-
- if flags&indVarMaxInc != 0 && max.Op == OpConst64 && max.AuxInt+step < max.AuxInt {
- // For a <= comparison, we need to make sure that a value equal to
- // max can be incremented without overflowing.
- // (For a < comparison, the %step check below ensures no overflow.)
- continue
- }
-
// Up to now we extracted the induction variable (ind),
// the increment delta (inc), the temporary sum (nxt),
- // the minimum value (min) and the maximum value (max).
+ // the initial value (init) and the limiting value (limit).
//
- // We also know that ind has the form (Phi min nxt) where
+ // We also know that ind has the form (Phi init nxt) where
// nxt is (Add inc nxt) which means: 1) inc dominates nxt
// and 2) there is a loop starting at inc and containing nxt.
//
// We need to prove that the induction variable is incremented
- // only when it's smaller than the maximum value.
+ // only when it's smaller than the limiting value.
// Two conditions must happen listed below to accept ind
// as an induction variable.
// First condition: loop entry has a single predecessor, which
// is the header block. This implies that b.Succs[0] is
- // reached iff ind < max.
+ // reached iff ind < limit.
if len(b.Succs[0].b.Preds) != 1 {
// b.Succs[1] must exit the loop.
continue
}
// Second condition: b.Succs[0] dominates nxt so that
- // nxt is computed when inc < max, meaning nxt <= max.
+ // nxt is computed when inc < limit.
if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
// inc+ind can only be reached through the branch that enters the loop.
continue
}
- // We can only guarantee that the loop runs within limits of induction variable
- // if (one of)
- // (1) the increment is ±1
- // (2) the limits are constants
- // (3) loop is of the form k0 upto Known_not_negative-k inclusive, step <= k
- // (4) loop is of the form k0 upto Known_not_negative-k exclusive, step <= k+1
- // (5) loop is of the form Known_not_negative downto k0, minint+step < k0
- if step > 1 {
- ok := false
- if min.Op == OpConst64 && max.Op == OpConst64 {
- if max.AuxInt > min.AuxInt && max.AuxInt%step == min.AuxInt%step { // handle overflow
- ok = true
- }
- }
- // Handle induction variables of these forms.
- // KNN is known-not-negative.
- // SIGNED ARITHMETIC ONLY. (see switch on c above)
- // Possibilities for KNN are len and cap; perhaps we can infer others.
- // for i := 0; i <= KNN-k ; i += k
- // for i := 0; i < KNN-(k-1); i += k
- // Also handle decreasing.
-
- // "Proof" copied from https://go-review.googlesource.com/c/go/+/104041/10/src/cmd/compile/internal/ssa/loopbce.go#164
- //
- // In the case of
- // // PC is Positive Constant
- // L := len(A)-PC
- // for i := 0; i < L; i = i+PC
- //
- // we know:
- //
- // 0 + PC does not over/underflow.
- // len(A)-PC does not over/underflow
- // maximum value for L is MaxInt-PC
- // i < L <= MaxInt-PC means i + PC < MaxInt hence no overflow.
-
- // To match in SSA:
- // if (a) min.Op == OpConst64(k0)
- // and (b) k0 >= MININT + step
- // and (c) max.Op == OpSubtract(Op{StringLen,SliceLen,SliceCap}, k)
- // or (c) max.Op == OpAdd(Op{StringLen,SliceLen,SliceCap}, -k)
- // or (c) max.Op == Op{StringLen,SliceLen,SliceCap}
- // and (d) if upto loop, require indVarMaxInc && step <= k or !indVarMaxInc && step-1 <= k
-
- if min.Op == OpConst64 && min.AuxInt >= step+math.MinInt64 {
- knn := max
- k := int64(0)
- var kArg *Value
-
- switch max.Op {
- case OpSub64:
- knn = max.Args[0]
- kArg = max.Args[1]
-
- case OpAdd64:
- knn = max.Args[0]
- kArg = max.Args[1]
- if knn.Op == OpConst64 {
- knn, kArg = kArg, knn
+ // Check for overflow/underflow. We need to make sure that inc never causes
+ // the induction variable to wrap around.
+ // We use a function wrapper here for easy return true / return false / keep going logic.
+ // This function returns true if the increment will never overflow/underflow.
+ ok := func() bool {
+ if step > 0 {
+ if limit.Op == OpConst64 {
+ // Figure out the actual largest value.
+ v := limit.AuxInt
+ if !inclusive {
+ if v == math.MinInt64 {
+ return false // < minint is never satisfiable.
+ }
+ v--
+ }
+ if init.Op == OpConst64 {
+ // Use stride to compute a better lower limit.
+ if init.AuxInt > v {
+ return false
+ }
+ v = addU(init.AuxInt, diff(v, init.AuxInt)/uint64(step)*uint64(step))
}
+ // It is ok if we can't overflow when incrementing from the largest value.
+ return !addWillOverflow(v, step)
}
- switch knn.Op {
- case OpSliceLen, OpStringLen, OpSliceCap:
- default:
- knn = nil
+ if step == 1 && !inclusive {
+ // Can't overflow because maxint is never a possible value.
+ return true
}
-
- if kArg != nil && kArg.Op == OpConst64 {
- k = kArg.AuxInt
- if max.Op == OpAdd64 {
- k = -k
- }
+ // If the limit is not a constant, check to see if it is a
+ // negative offset from a known non-negative value.
+ knn, k := findKNN(limit)
+ if knn == nil || k < 0 {
+ return false
+ }
+ // limit == (something nonnegative) - k. That subtraction can't underflow, so
+ // we can trust it.
+ if inclusive {
+ // ind <= knn - k cannot overflow if step is at most k
+ return step <= k
}
- if k >= 0 && knn != nil {
- if inc.AuxInt > 0 { // increasing iteration
- // The concern for the relation between step and k is to ensure that iv never exceeds knn
- // i.e., iv < knn-(K-1) ==> iv + K <= knn; iv <= knn-K ==> iv +K < knn
- if step <= k || flags&indVarMaxInc == 0 && step-1 == k {
- ok = true
+ // ind < knn - k cannot overflow if step is at most k+1
+ return step <= k+1 && k != math.MaxInt64
+ } else { // step < 0
+ if limit.Op == OpConst64 {
+ // Figure out the actual smallest value.
+ v := limit.AuxInt
+ if !inclusive {
+ if v == math.MaxInt64 {
+ return false // > maxint is never satisfiable.
}
- } else { // decreasing iteration
- // Will be decrementing from max towards min; max is knn-k; will only attempt decrement if
- // knn-k >[=] min; underflow is only a concern if min-step is not smaller than min.
- // This all assumes signed integer arithmetic
- // This is already assured by the test above: min.AuxInt >= step+math.MinInt64
- ok = true
+ v++
}
+ if init.Op == OpConst64 {
+ // Use stride to compute a better lower limit.
+ if init.AuxInt < v {
+ return false
+ }
+ v = subU(init.AuxInt, diff(init.AuxInt, v)/uint64(-step)*uint64(-step))
+ }
+ // It is ok if we can't underflow when decrementing from the smallest value.
+ return !subWillUnderflow(v, -step)
+ }
+ if step == -1 && !inclusive {
+ // Can't underflow because minint is never a possible value.
+ return true
}
}
+ return false
- // TODO: other unrolling idioms
- // for i := 0; i < KNN - KNN % k ; i += k
- // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
- // for i := 0; i < KNN&(-k) ; i += k // k a power of 2
+ }
- if !ok {
- continue
+ if ok() {
+ flags := indVarFlags(0)
+ var min, max *Value
+ if step > 0 {
+ min = init
+ max = limit
+ if inclusive {
+ flags |= indVarMaxInc
+ }
+ } else {
+ min = limit
+ max = init
+ flags |= indVarMaxInc
+ if !inclusive {
+ flags |= indVarMinExc
+ }
+ step = -step
+ }
+ if f.pass.debug >= 1 {
+ printIndVar(b, ind, min, max, step, flags)
}
- }
- if f.pass.debug >= 1 {
- printIndVar(b, ind, min, max, step, flags)
+ iv = append(iv, indVar{
+ ind: ind,
+ min: min,
+ max: max,
+ entry: b.Succs[0].b,
+ flags: flags,
+ })
+ b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
}
- iv = append(iv, indVar{
- ind: ind,
- min: min,
- max: max,
- entry: b.Succs[0].b,
- flags: flags,
- })
- b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
+ // TODO: other unrolling idioms
+ // for i := 0; i < KNN - KNN % k ; i += k
+ // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
+ // for i := 0; i < KNN&(-k) ; i += k // k a power of 2
}
return iv
}
-func dropAdd64(v *Value) (*Value, int64) {
- if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 {
- return v.Args[1], v.Args[0].AuxInt
+// addWillOverflow reports whether x+y would result in a value more than maxint.
+func addWillOverflow(x, y int64) bool {
+ return x+y < x
+}
+
+// subWillUnderflow reports whether x-y would result in a value less than minint.
+func subWillUnderflow(x, y int64) bool {
+ return x-y > x
+}
+
+// diff returns x-y as a uint64. Requires x>=y.
+func diff(x, y int64) uint64 {
+ if x < y {
+ base.Fatalf("diff %d - %d underflowed", x, y)
+ }
+ return uint64(x - y)
+}
+
+// addU returns x+y. Requires that x+y does not overflow an int64.
+func addU(x int64, y uint64) int64 {
+ if y >= 1<<63 {
+ if x >= 0 {
+ base.Fatalf("addU overflowed %d + %d", x, y)
+ }
+ x += 1<<63 - 1
+ x += 1
+ y -= 1 << 63
+ }
+ if addWillOverflow(x, int64(y)) {
+ base.Fatalf("addU overflowed %d + %d", x, y)
+ }
+ return x + int64(y)
+}
+
+// subU returns x-y. Requires that x-y does not underflow an int64.
+func subU(x int64, y uint64) int64 {
+ if y >= 1<<63 {
+ if x < 0 {
+ base.Fatalf("subU underflowed %d - %d", x, y)
+ }
+ x -= 1<<63 - 1
+ x -= 1
+ y -= 1 << 63
+ }
+ if subWillUnderflow(x, int64(y)) {
+ base.Fatalf("subU underflowed %d - %d", x, y)
+ }
+ return x - int64(y)
+}
+
+// if v is known to be x - c, where x is known to be nonnegative and c is a
+// constant, return x, c. Otherwise return nil, 0.
+func findKNN(v *Value) (*Value, int64) {
+ var x, y *Value
+ x = v
+ switch v.Op {
+ case OpSub64:
+ x = v.Args[0]
+ y = v.Args[1]
+
+ case OpAdd64:
+ x = v.Args[0]
+ y = v.Args[1]
+ if x.Op == OpConst64 {
+ x, y = y, x
+ }
+ }
+ switch x.Op {
+ case OpSliceLen, OpStringLen, OpSliceCap:
+ default:
+ return nil, 0
+ }
+ if y == nil {
+ return x, 0
+ }
+ if y.Op != OpConst64 {
+ return nil, 0
}
- if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 {
- return v.Args[0], v.Args[1].AuxInt
+ if v.Op == OpAdd64 {
+ return x, -y.AuxInt
}
- return v, 0
+ return x, y.AuxInt
}
func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) {
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index 68b9ac3ff3..d034808132 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -169,6 +169,8 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
}
// Special case: sync/atomic.align64 is an empty struct we recognize
// as a signal that the struct it contains must be 64-bit-aligned.
+ //
+ // This logic is duplicated in go/types and cmd/compile/internal/types2.
if isStruct && t.NumFields() == 0 && t.Sym() != nil && t.Sym().Name == "align64" && isAtomicStdPkg(t.Sym().Pkg) {
maxalign = 8
}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index a22ea5d12f..94c290b9ee 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -418,7 +418,8 @@ func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Packa
// AssertableTo reports whether a value of type V can be asserted to have type T.
//
-// The behavior of AssertableTo is undefined in two cases:
+// The behavior of AssertableTo is unspecified in three cases:
+// - if T is Typ[Invalid]
// - if V is a generalized interface; i.e., an interface that may only be used
// as a type constraint in Go code
// - if T is an uninstantiated generic type
@@ -434,8 +435,8 @@ func AssertableTo(V *Interface, T Type) bool {
// AssignableTo reports whether a value of type V is assignable to a variable
// of type T.
//
-// The behavior of AssignableTo is undefined if V or T is an uninstantiated
-// generic type.
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
func AssignableTo(V, T Type) bool {
x := operand{mode: value, typ: V}
ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
@@ -445,8 +446,8 @@ func AssignableTo(V, T Type) bool {
// ConvertibleTo reports whether a value of type V is convertible to a value of
// type T.
//
-// The behavior of ConvertibleTo is undefined if V or T is an uninstantiated
-// generic type.
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
func ConvertibleTo(V, T Type) bool {
x := operand{mode: value, typ: V}
return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
@@ -454,8 +455,8 @@ func ConvertibleTo(V, T Type) bool {
// Implements reports whether type V implements interface T.
//
-// The behavior of Implements is undefined if V is an uninstantiated generic
-// type.
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
func Implements(V Type, T *Interface) bool {
if T.Empty() {
// All types (even Typ[Invalid]) implement the empty interface.
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 6133e15924..4da309461f 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -53,6 +53,17 @@ func (s *StdSizes) Alignof(T Type) int64 {
// is the same as unsafe.Alignof(x[0]), but at least 1."
return s.Alignof(t.elem)
case *Struct:
+ if len(t.fields) == 0 && isSyncAtomicAlign64(T) {
+ // Special case: sync/atomic.align64 is an
+ // empty struct we recognize as a signal that
+ // the struct it contains must be
+ // 64-bit-aligned.
+ //
+ // This logic is equivalent to the logic in
+ // cmd/compile/internal/types/size.go:calcStructOffset
+ return 8
+ }
+
// spec: "For a variable x of struct type: unsafe.Alignof(x)
// is the largest of the values unsafe.Alignof(x.f) for each
// field f of x, but at least 1."
@@ -93,6 +104,18 @@ func (s *StdSizes) Alignof(T Type) int64 {
return a
}
+func isSyncAtomicAlign64(T Type) bool {
+ named, ok := T.(*Named)
+ if !ok {
+ return false
+ }
+ obj := named.Obj()
+ return obj.Name() == "align64" &&
+ obj.Pkg() != nil &&
+ (obj.Pkg().Path() == "sync/atomic" ||
+ obj.Pkg().Path() == "runtime/internal/atomic")
+}
+
func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
offsets := make([]int64, len(fields))
var o int64
diff --git a/src/cmd/compile/internal/types2/sizes_test.go b/src/cmd/compile/internal/types2/sizes_test.go
index c9a4942bed..824ec838e2 100644
--- a/src/cmd/compile/internal/types2/sizes_test.go
+++ b/src/cmd/compile/internal/types2/sizes_test.go
@@ -14,12 +14,15 @@ import (
// findStructType typechecks src and returns the first struct type encountered.
func findStructType(t *testing.T, src string) *types2.Struct {
+ return findStructTypeConfig(t, src, &types2.Config{})
+}
+
+func findStructTypeConfig(t *testing.T, src string, conf *types2.Config) *types2.Struct {
f, err := parseSrc("x.go", src)
if err != nil {
t.Fatal(err)
}
info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)}
- var conf types2.Config
_, err = conf.Check("x", []*syntax.File{f}, &info)
if err != nil {
t.Fatal(err)
@@ -105,3 +108,39 @@ const _ = unsafe.Offsetof(struct{ x int64 }{}.x)
_ = conf.Sizes.Alignof(tv.Type)
}
}
+
+// Issue #53884.
+func TestAtomicAlign(t *testing.T) {
+ const src = `
+package main
+
+import "sync/atomic"
+
+var s struct {
+ x int32
+ y atomic.Int64
+ z int64
+}
+`
+
+ want := []int64{0, 8, 16}
+ for _, arch := range []string{"386", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ conf := types2.Config{
+ Importer: defaultImporter(),
+ Sizes: types2.SizesFor("gc", arch),
+ }
+ ts := findStructTypeConfig(t, src, &conf)
+ var fields []*types2.Var
+ // Make a copy manually :(
+ for i := 0; i < ts.NumFields(); i++ {
+ fields = append(fields, ts.Field(i))
+ }
+
+ offsets := conf.Sizes.Offsetsof(fields)
+ if offsets[0] != want[0] || offsets[1] != want[1] || offsets[2] != want[2] {
+ t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, want)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go
index b408dd7003..b7d99f96c2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go
@@ -31,10 +31,8 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-// Embedding stand-alone type parameters is not permitted for now. Disabled.
-// type foo8[A any] interface { ~A }
-// func bar8[A foo8[A]](a A) {}
-// func main8() {}
+type foo8[A any] interface { ~A /* ERROR cannot be a type parameter */ }
+func bar8[A foo8[A]](a A) {}
// crash 9
type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
@@ -74,10 +72,9 @@ func F20[t Z20]() { F20(t /* ERROR invalid composite literal type */ {}) }
type Z21 /* ERROR illegal cycle */ interface{ Z21 }
func F21[T Z21]() { ( /* ERROR not used */ F21[Z21]) }
-// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
-// // crash 24
-// type T24[P any] P
-// func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
+// crash 24
+type T24[P any] P // ERROR cannot use a type parameter as RHS in type declaration
+func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
// crash 25
type T25[A any] int
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 19376f5bda..91a2f73cc6 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -63,7 +63,7 @@ func order(fn *ir.Func) {
s := fmt.Sprintf("\nbefore order %v", fn.Sym())
ir.DumpList(s, fn.Body)
}
-
+ ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
orderBlock(&fn.Body, map[string][]*ir.Name{})
}
@@ -477,6 +477,12 @@ func (o *orderState) edge() {
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+ if len(*n) != 0 {
+ // Set reasonable position for instrumenting code. See issue 53688.
+ // It would be nice if ir.Nodes had a position (the opening {, probably),
+ // but it doesn't. So we use the first statement's position instead.
+ ir.SetPos((*n)[0])
+ }
var order orderState
order.free = free
mark := order.markTemp()
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 947da115e3..400c2e85b6 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -204,6 +204,8 @@ func bootstrapBuildTools() {
// https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ
// Use the math_big_pure_go build tag to disable the assembly in math/big
// which may contain unsupported instructions.
+ // Use the purego build tag to disable other assembly code,
+ // such as in cmd/internal/notsha256.
// Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l
// only applies to the final cmd/go binary, but that's OK: if this is Go 1.10
// or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler.
@@ -211,7 +213,7 @@ func bootstrapBuildTools() {
pathf("%s/bin/go", goroot_bootstrap),
"install",
"-gcflags=-l",
- "-tags=math_big_pure_go compiler_bootstrap",
+ "-tags=math_big_pure_go compiler_bootstrap purego",
}
if vflag > 0 {
cmd = append(cmd, "-v")
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index fdb7a085b0..db6372642a 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -195,11 +195,10 @@
// For example, when building with a non-standard configuration,
// use -pkgdir to keep generated packages in a separate location.
// -tags tag,list
-// a comma-separated list of build tags to consider satisfied during the
-// build. For more information about build tags, see the description of
-// build constraints in the documentation for the go/build package.
-// (Earlier versions of Go used a space-separated list, and that form
-// is deprecated but still recognized.)
+// a comma-separated list of additional build tags to consider satisfied
+// during the build. For more information about build tags, see
+// 'go help buildconstraint'. (Earlier versions of Go used a
+// space-separated list, and that form is deprecated but still recognized.)
// -trimpath
// remove all file system paths from the resulting executable.
// Instead of absolute file system paths, the recorded file names
@@ -931,6 +930,7 @@
//
// type Module struct {
// Path string // module path
+// Query string // version query corresponding to this version
// Version string // module version
// Versions []string // available module versions
// Replace *Module // replaced by this module
@@ -944,6 +944,8 @@
// Retracted []string // retraction information, if any (with -retracted or -u)
// Deprecated string // deprecation message, if any (with -u)
// Error *ModuleError // error loading module
+// Origin any // provenance of module
+// Reuse bool // reuse of old module info is safe
// }
//
// type ModuleError struct {
@@ -1020,6 +1022,16 @@
// module as a Module struct. If an error occurs, the result will
// be a Module struct with a non-nil Error field.
//
+// When using -m, the -reuse=old.json flag accepts the name of file containing
+// the JSON output of a previous 'go list -m -json' invocation with the
+// same set of modifier flags (such as -u, -retracted, and -versions).
+// The go command may use this file to determine that a module is unchanged
+// since the previous invocation and avoid redownloading information about it.
+// Modules that are not redownloaded will be marked in the new output by
+// setting the Reuse field to true. Normally the module cache provides this
+// kind of reuse automatically; the -reuse flag can be useful on systems that
+// do not preserve the module cache.
+//
// For more about build flags, see 'go help build'.
//
// For more about specifying packages, see 'go help packages'.
@@ -1056,7 +1068,7 @@
//
// Usage:
//
-// go mod download [-x] [-json] [modules]
+// go mod download [-x] [-json] [-reuse=old.json] [modules]
//
// Download downloads the named modules, which can be module patterns selecting
// dependencies of the main module or module queries of the form path@version.
@@ -1079,6 +1091,7 @@
//
// type Module struct {
// Path string // module path
+// Query string // version query corresponding to this version
// Version string // module version
// Error string // error loading module
// Info string // absolute path to cached .info file
@@ -1087,8 +1100,18 @@
// Dir string // absolute path to cached source root directory
// Sum string // checksum for path, version (as in go.sum)
// GoModSum string // checksum for go.mod (as in go.sum)
+// Origin any // provenance of module
+// Reuse bool // reuse of old module info is safe
// }
//
+// The -reuse flag accepts the name of file containing the JSON output of a
+// previous 'go mod download -json' invocation. The go command may use this
+// file to determine that a module is unchanged since the previous invocation
+// and avoid redownloading it. Modules that are not redownloaded will be marked
+// in the new output by setting the Reuse field to true. Normally the module
+// cache provides this kind of reuse automatically; the -reuse flag can be
+// useful on systems that do not preserve the module cache.
+//
// The -x flag causes download to print the commands download executes.
//
// See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'.
@@ -1797,11 +1820,12 @@
//
// # Build constraints
//
-// A build constraint, also known as a build tag, is a line comment that begins
+// A build constraint, also known as a build tag, is a condition under which a
+// file should be included in the package. Build constraints are given by a
+// line comment that begins
//
// //go:build
//
-// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
// they must appear near the top of the file, preceded
// only by blank lines and other line comments. These rules mean that in Go
@@ -1810,9 +1834,9 @@
// To distinguish build constraints from package documentation,
// a build constraint should be followed by a blank line.
//
-// A build constraint is evaluated as an expression containing options
-// combined by ||, &&, and ! operators and parentheses. Operators have
-// the same meaning as in Go.
+// A build constraint comment is evaluated as an expression containing
+// build tags combined by ||, &&, and ! operators and parentheses.
+// Operators have the same meaning as in Go.
//
// For example, the following build constraint constrains a file to
// build when the "linux" and "386" constraints are satisfied, or when
@@ -1822,7 +1846,7 @@
//
// It is an error for a file to have more than one //go:build line.
//
-// During a particular build, the following words are satisfied:
+// During a particular build, the following build tags are satisfied:
//
// - the target operating system, as spelled by runtime.GOOS, set with the
// GOOS environment variable.
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index b39a62f3e4..a0082a3164 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -991,21 +991,6 @@ func TestIssue10952(t *testing.T) {
tg.run("get", "-d", "-u", importPath)
}
-func TestIssue16471(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
- testenv.MustHaveExecPath(t, "git")
-
- tg := testgo(t)
- defer tg.cleanup()
- tg.parallel()
- tg.tempDir("src")
- tg.setenv("GOPATH", tg.path("."))
- tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
- tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
- tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
- tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
-}
-
// Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
@@ -1363,6 +1348,15 @@ func tempEnvName() string {
}
}
+func pathEnvName() string {
+ switch runtime.GOOS {
+ case "plan9":
+ return "path"
+ default:
+ return "PATH"
+ }
+}
+
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index 41d0bbfe66..0d7bef9112 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -6,16 +6,65 @@ import (
"encoding/json"
"errors"
"fmt"
+ "internal/godebug"
"io/fs"
"io/ioutil"
+ "log"
"os"
+ pathpkg "path"
"path/filepath"
"runtime"
+ "runtime/debug"
"sort"
"strings"
+ "sync"
"time"
)
+// Trace emits a trace event for the operation and file path to the trace log,
+// but only when $GODEBUG contains gofsystrace=1.
+// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error.
+// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths
+// matching that glob pattern (using path.Match) will be followed by a full stack trace.
+func Trace(op, path string) {
+ if !doTrace {
+ return
+ }
+ traceMu.Lock()
+ defer traceMu.Unlock()
+ fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path)
+ if traceStack != "" {
+ if match, _ := pathpkg.Match(traceStack, path); match {
+ traceFile.Write(debug.Stack())
+ }
+ }
+}
+
+var (
+ doTrace bool
+ traceStack string
+ traceFile *os.File
+ traceMu sync.Mutex
+)
+
+func init() {
+ if godebug.Get("gofsystrace") != "1" {
+ return
+ }
+ doTrace = true
+ traceStack = godebug.Get("gofsystracestack")
+ if f := godebug.Get("gofsystracelog"); f != "" {
+ // Note: No buffering on writes to this file, so no need to worry about closing it at exit.
+ var err error
+ traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ traceFile = os.Stderr
+ }
+}
+
// OverlayFile is the path to a text file in the OverlayJSON format.
// It is the value of the -overlay flag.
var OverlayFile string
@@ -86,6 +135,7 @@ func Init(wd string) error {
return nil
}
+ Trace("ReadFile", OverlayFile)
b, err := os.ReadFile(OverlayFile)
if err != nil {
return fmt.Errorf("reading overlay file: %v", err)
@@ -191,6 +241,7 @@ func initFromJSON(overlayJSON OverlayJSON) error {
// IsDir returns true if path is a directory on disk or in the
// overlay.
func IsDir(path string) (bool, error) {
+ Trace("IsDir", path)
path = canonicalize(path)
if _, ok := parentIsOverlayFile(path); ok {
@@ -260,6 +311,7 @@ func readDir(dir string) ([]fs.FileInfo, error) {
// ReadDir provides a slice of fs.FileInfo entries corresponding
// to the overlaid files in the directory.
func ReadDir(dir string) ([]fs.FileInfo, error) {
+ Trace("ReadDir", dir)
dir = canonicalize(dir)
if _, ok := parentIsOverlayFile(dir); ok {
return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
@@ -327,11 +379,17 @@ func OverlayPath(path string) (string, bool) {
// Open opens the file at or overlaid on the given path.
func Open(path string) (*os.File, error) {
- return OpenFile(path, os.O_RDONLY, 0)
+ Trace("Open", path)
+ return openFile(path, os.O_RDONLY, 0)
}
// OpenFile opens the file at or overlaid on the given path with the flag and perm.
func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+ Trace("OpenFile", path)
+ return openFile(path, flag, perm)
+}
+
+func openFile(path string, flag int, perm os.FileMode) (*os.File, error) {
cpath := canonicalize(path)
if node, ok := overlay[cpath]; ok {
// Opening a file in the overlay.
@@ -360,6 +418,7 @@ func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
// IsDirWithGoFiles reports whether dir is a directory containing Go files
// either on disk or in the overlay.
func IsDirWithGoFiles(dir string) (bool, error) {
+ Trace("IsDirWithGoFiles", dir)
fis, err := ReadDir(dir)
if os.IsNotExist(err) || errors.Is(err, errNotDir) {
return false, nil
@@ -405,28 +464,20 @@ func IsDirWithGoFiles(dir string) (bool, error) {
// walk recursively descends path, calling walkFn. Copied, with some
// modifications from path/filepath.walk.
func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
- if !info.IsDir() {
- return walkFn(path, info, nil)
+ if err := walkFn(path, info, nil); err != nil || !info.IsDir() {
+ return err
}
- fis, readErr := ReadDir(path)
- walkErr := walkFn(path, info, readErr)
- // If readErr != nil, walk can't walk into this directory.
- // walkErr != nil means walkFn want walk to skip this directory or stop walking.
- // Therefore, if one of readErr and walkErr isn't nil, walk will return.
- if readErr != nil || walkErr != nil {
- // The caller's behavior is controlled by the return value, which is decided
- // by walkFn. walkFn may ignore readErr and return nil.
- // If walkFn returns SkipDir, it will be handled by the caller.
- // So walk should return whatever walkFn returns.
- return walkErr
+ fis, err := ReadDir(path)
+ if err != nil {
+ return walkFn(path, info, err)
}
for _, fi := range fis {
filename := filepath.Join(path, fi.Name())
- if walkErr = walk(filename, fi, walkFn); walkErr != nil {
- if !fi.IsDir() || walkErr != filepath.SkipDir {
- return walkErr
+ if err := walk(filename, fi, walkFn); err != nil {
+ if !fi.IsDir() || err != filepath.SkipDir {
+ return err
}
}
}
@@ -436,6 +487,7 @@ func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root.
func Walk(root string, walkFn filepath.WalkFunc) error {
+ Trace("Walk", root)
info, err := Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
@@ -450,11 +502,13 @@ func Walk(root string, walkFn filepath.WalkFunc) error {
// lstat implements a version of os.Lstat that operates on the overlay filesystem.
func Lstat(path string) (fs.FileInfo, error) {
+ Trace("Lstat", path)
return overlayStat(path, os.Lstat, "lstat")
}
// Stat implements a version of os.Stat that operates on the overlay filesystem.
func Stat(path string) (fs.FileInfo, error) {
+ Trace("Stat", path)
return overlayStat(path, os.Stat, "stat")
}
@@ -528,6 +582,7 @@ func (f fakeDir) Sys() any { return nil }
// Glob is like filepath.Glob but uses the overlay file system.
func Glob(pattern string) (matches []string, err error) {
+ Trace("Glob", pattern)
// Check pattern is well-formed.
if _, err := filepath.Match(pattern, ""); err != nil {
return nil, err
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index 36bc4f28b7..c38c403006 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -812,11 +812,12 @@ var HelpBuildConstraint = &base.Command{
UsageLine: "buildconstraint",
Short: "build constraints",
Long: `
-A build constraint, also known as a build tag, is a line comment that begins
+A build constraint, also known as a build tag, is a condition under which a
+file should be included in the package. Build constraints are given by a
+line comment that begins
//go:build
-that lists the conditions under which a file should be included in the package.
Constraints may appear in any kind of source file (not just Go), but
they must appear near the top of the file, preceded
only by blank lines and other line comments. These rules mean that in Go
@@ -825,9 +826,9 @@ files a build constraint must appear before the package clause.
To distinguish build constraints from package documentation,
a build constraint should be followed by a blank line.
-A build constraint is evaluated as an expression containing options
-combined by ||, &&, and ! operators and parentheses. Operators have
-the same meaning as in Go.
+A build constraint comment is evaluated as an expression containing
+build tags combined by ||, &&, and ! operators and parentheses.
+Operators have the same meaning as in Go.
For example, the following build constraint constrains a file to
build when the "linux" and "386" constraints are satisfied, or when
@@ -837,7 +838,7 @@ build when the "linux" and "386" constraints are satisfied, or when
It is an error for a file to have more than one //go:build line.
-During a particular build, the following words are satisfied:
+During a particular build, the following build tags are satisfied:
- the target operating system, as spelled by runtime.GOOS, set with the
GOOS environment variable.
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index 9c651f2bf3..5f8be6e3c9 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -223,6 +223,7 @@ applied to a Go struct, but now a Module struct:
type Module struct {
Path string // module path
+ Query string // version query corresponding to this version
Version string // module version
Versions []string // available module versions
Replace *Module // replaced by this module
@@ -236,6 +237,8 @@ applied to a Go struct, but now a Module struct:
Retracted []string // retraction information, if any (with -retracted or -u)
Deprecated string // deprecation message, if any (with -u)
Error *ModuleError // error loading module
+ Origin any // provenance of module
+ Reuse bool // reuse of old module info is safe
}
type ModuleError struct {
@@ -312,6 +315,16 @@ that must be a module path or query and returns the specified
module as a Module struct. If an error occurs, the result will
be a Module struct with a non-nil Error field.
+When using -m, the -reuse=old.json flag accepts the name of file containing
+the JSON output of a previous 'go list -m -json' invocation with the
+same set of modifier flags (such as -u, -retracted, and -versions).
+The go command may use this file to determine that a module is unchanged
+since the previous invocation and avoid redownloading information about it.
+Modules that are not redownloaded will be marked in the new output by
+setting the Reuse field to true. Normally the module cache provides this
+kind of reuse automatically; the -reuse flag can be useful on systems that
+do not preserve the module cache.
+
For more about build flags, see 'go help build'.
For more about specifying packages, see 'go help packages'.
@@ -337,6 +350,7 @@ var (
listJsonFields jsonFlag // If not empty, only output these fields.
listM = CmdList.Flag.Bool("m", false, "")
listRetracted = CmdList.Flag.Bool("retracted", false, "")
+ listReuse = CmdList.Flag.String("reuse", "", "")
listTest = CmdList.Flag.Bool("test", false, "")
listU = CmdList.Flag.Bool("u", false, "")
listVersions = CmdList.Flag.Bool("versions", false, "")
@@ -398,6 +412,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listFmt != "" && listJson == true {
base.Fatalf("go list -f cannot be used with -json")
}
+ if *listReuse != "" && !*listM {
+ base.Fatalf("go list -reuse cannot be used without -m")
+ }
+ if *listReuse != "" && modload.HasModRoot() {
+ base.Fatalf("go list -reuse cannot be used inside a module")
+ }
work.BuildInit()
out := newTrackingWriter(os.Stdout)
@@ -532,7 +552,10 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
mode |= modload.ListRetractedVersions
}
}
- mods, err := modload.ListModules(ctx, args, mode)
+ if *listReuse != "" && len(args) == 0 {
+ base.Fatalf("go: list -m -reuse only has an effect with module@version arguments")
+ }
+ mods, err := modload.ListModules(ctx, args, mode, *listReuse)
if !*listE {
for _, m := range mods {
if m.Error != nil {
@@ -783,7 +806,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listRetracted {
mode |= modload.ListRetracted
}
- rmods, err := modload.ListModules(ctx, args, mode)
+ rmods, err := modload.ListModules(ctx, args, mode, *listReuse)
if err != nil && !*listE {
base.Errorf("go: %v", err)
}
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index fcb72b07b2..046f508545 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -877,7 +877,14 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
if !cfg.ModulesEnabled {
buildMode = build.ImportComment
}
- if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" {
+ modroot := modload.PackageModRoot(ctx, r.path)
+ if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
+ modroot = cfg.GOROOTsrc
+ if str.HasPathPrefix(r.dir, cfg.GOROOTsrc+string(filepath.Separator)+"cmd") {
+ modroot += string(filepath.Separator) + "cmd"
+ }
+ }
+ if modroot != "" {
if rp, err := modindex.GetPackage(modroot, r.dir); err == nil {
data.p, data.err = rp.Import(cfg.BuildContext, buildMode)
goto Happy
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index 5bc6cbc4bb..0b50afb668 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -13,6 +13,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modload"
"golang.org/x/mod/module"
@@ -20,7 +21,7 @@ import (
)
var cmdDownload = &base.Command{
- UsageLine: "go mod download [-x] [-json] [modules]",
+ UsageLine: "go mod download [-x] [-json] [-reuse=old.json] [modules]",
Short: "download modules to local cache",
Long: `
Download downloads the named modules, which can be module patterns selecting
@@ -44,6 +45,7 @@ corresponding to this Go struct:
type Module struct {
Path string // module path
+ Query string // version query corresponding to this version
Version string // module version
Error string // error loading module
Info string // absolute path to cached .info file
@@ -52,8 +54,18 @@ corresponding to this Go struct:
Dir string // absolute path to cached source root directory
Sum string // checksum for path, version (as in go.sum)
GoModSum string // checksum for go.mod (as in go.sum)
+ Origin any // provenance of module
+ Reuse bool // reuse of old module info is safe
}
+The -reuse flag accepts the name of file containing the JSON output of a
+previous 'go mod download -json' invocation. The go command may use this
+file to determine that a module is unchanged since the previous invocation
+and avoid redownloading it. Modules that are not redownloaded will be marked
+in the new output by setting the Reuse field to true. Normally the module
+cache provides this kind of reuse automatically; the -reuse flag can be
+useful on systems that do not preserve the module cache.
+
The -x flag causes download to print the commands download executes.
See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'.
@@ -62,7 +74,10 @@ See https://golang.org/ref/mod#version-queries for more about version queries.
`,
}
-var downloadJSON = cmdDownload.Flag.Bool("json", false, "")
+var (
+ downloadJSON = cmdDownload.Flag.Bool("json", false, "")
+ downloadReuse = cmdDownload.Flag.String("reuse", "", "")
+)
func init() {
cmdDownload.Run = runDownload // break init cycle
@@ -75,6 +90,7 @@ func init() {
type moduleJSON struct {
Path string `json:",omitempty"`
Version string `json:",omitempty"`
+ Query string `json:",omitempty"`
Error string `json:",omitempty"`
Info string `json:",omitempty"`
GoMod string `json:",omitempty"`
@@ -82,6 +98,9 @@ type moduleJSON struct {
Dir string `json:",omitempty"`
Sum string `json:",omitempty"`
GoModSum string `json:",omitempty"`
+
+ Origin *codehost.Origin `json:",omitempty"`
+ Reuse bool `json:",omitempty"`
}
func runDownload(ctx context.Context, cmd *base.Command, args []string) {
@@ -148,12 +167,12 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
}
downloadModule := func(m *moduleJSON) {
- var err error
- m.Info, err = modfetch.InfoFile(m.Path, m.Version)
+ _, file, err := modfetch.InfoFile(m.Path, m.Version)
if err != nil {
m.Error = err.Error()
return
}
+ m.Info = file
m.GoMod, err = modfetch.GoModFile(m.Path, m.Version)
if err != nil {
m.Error = err.Error()
@@ -179,15 +198,21 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
}
var mods []*moduleJSON
+
+ if *downloadReuse != "" && modload.HasModRoot() {
+ base.Fatalf("go mod download -reuse cannot be used inside a module")
+ }
+
type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0))
- infos, infosErr := modload.ListModules(ctx, args, 0)
- if !haveExplicitArgs {
+ infos, infosErr := modload.ListModules(ctx, args, 0, *downloadReuse)
+ if !haveExplicitArgs && modload.WorkFilePath() == "" {
// 'go mod download' is sometimes run without arguments to pre-populate the
- // module cache. It may fetch modules that aren't needed to build packages
- // in the main module. This is usually not intended, so don't save sums for
- // downloaded modules (golang.org/issue/45332). We do still fix
- // inconsistencies in go.mod though.
+ // module cache. In modules that aren't at go 1.17 or higher, it may fetch
+ // modules that aren't needed to build packages in the main module. This is
+ // usually not intended, so don't save sums for downloaded modules
+ // (golang.org/issue/45332). We do still fix inconsistencies in go.mod
+ // though.
//
// TODO(#45551): In the future, report an error if go.mod or go.sum need to
// be updated after loading the build list. This may require setting
@@ -209,12 +234,18 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
m := &moduleJSON{
Path: info.Path,
Version: info.Version,
+ Query: info.Query,
+ Reuse: info.Reuse,
+ Origin: info.Origin,
}
mods = append(mods, m)
if info.Error != nil {
m.Error = info.Error.Err
continue
}
+ if m.Reuse {
+ continue
+ }
sem <- token{}
go func() {
downloadModule(m)
@@ -252,8 +283,19 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
// 'go get mod@version', which may have other side effects. We print this in
// some error message hints.
//
- // Don't save sums for 'go mod download' without arguments; see comment above.
- if haveExplicitArgs {
+ // If we're in workspace mode, update go.work.sum with checksums for all of
+ // the modules we downloaded that aren't already recorded. Since a requirement
+ // in one module may upgrade a dependency of another, we can't be sure that
+ // the import graph matches the import graph of any given module in isolation,
+ // so we may end up needing to load packages from modules that wouldn't
+ // otherwise be relevant.
+ //
+ // TODO(#44435): If we adjust the set of modules downloaded in workspace mode,
+ // we may also need to adjust the logic for saving checksums here.
+ //
+ // Don't save sums for 'go mod download' without arguments unless we're in
+ // workspace mode; see comment above.
+ if haveExplicitArgs || modload.WorkFilePath() != "" {
if err := modload.WriteGoMod(ctx); err != nil {
base.Errorf("go: %v", err)
}
diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go
index 2d3f1eb05b..8e929a0001 100644
--- a/src/cmd/go/internal/modcmd/why.go
+++ b/src/cmd/go/internal/modcmd/why.go
@@ -82,7 +82,7 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) {
}
}
- mods, err := modload.ListModules(ctx, args, 0)
+ mods, err := modload.ListModules(ctx, args, 0, "")
if err != nil {
base.Fatalf("go: %v", err)
}
diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go
index b0dae1cb3d..c1ed18736c 100644
--- a/src/cmd/go/internal/modfetch/cache.go
+++ b/src/cmd/go/internal/modfetch/cache.go
@@ -164,7 +164,7 @@ func SideLock() (unlock func(), err error) {
}
// A cachingRepo is a cache around an underlying Repo,
-// avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not Zip).
+// avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not CheckReuse or Zip).
// It is also safe for simultaneous use by multiple goroutines
// (so that it can be returned from Lookup multiple times).
// It serializes calls to the underlying Repo.
@@ -195,24 +195,32 @@ func (r *cachingRepo) repo() Repo {
return r.r
}
+func (r *cachingRepo) CheckReuse(old *codehost.Origin) error {
+ return r.repo().CheckReuse(old)
+}
+
func (r *cachingRepo) ModulePath() string {
return r.path
}
-func (r *cachingRepo) Versions(prefix string) ([]string, error) {
+func (r *cachingRepo) Versions(prefix string) (*Versions, error) {
type cached struct {
- list []string
- err error
+ v *Versions
+ err error
}
c := r.cache.Do("versions:"+prefix, func() any {
- list, err := r.repo().Versions(prefix)
- return cached{list, err}
+ v, err := r.repo().Versions(prefix)
+ return cached{v, err}
}).(cached)
if c.err != nil {
return nil, c.err
}
- return append([]string(nil), c.list...), nil
+ v := &Versions{
+ Origin: c.v.Origin,
+ List: append([]string(nil), c.v.List...),
+ }
+ return v, nil
}
type cachedInfo struct {
@@ -245,11 +253,12 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
return cachedInfo{info, err}
}).(cachedInfo)
- if c.err != nil {
- return nil, c.err
+ info := c.info
+ if info != nil {
+ copy := *info
+ info = &copy
}
- info := *c.info
- return &info, nil
+ return info, c.err
}
func (r *cachingRepo) Latest() (*RevInfo, error) {
@@ -269,11 +278,12 @@ func (r *cachingRepo) Latest() (*RevInfo, error) {
return cachedInfo{info, err}
}).(cachedInfo)
- if c.err != nil {
- return nil, c.err
+ info := c.info
+ if info != nil {
+ copy := *info
+ info = &copy
}
- info := *c.info
- return &info, nil
+ return info, c.err
}
func (r *cachingRepo) GoMod(version string) ([]byte, error) {
@@ -310,31 +320,41 @@ func (r *cachingRepo) Zip(dst io.Writer, version string) error {
return r.repo().Zip(dst, version)
}
-// InfoFile is like Lookup(path).Stat(version) but returns the name of the file
+// InfoFile is like Lookup(path).Stat(version) but also returns the name of the file
// containing the cached information.
-func InfoFile(path, version string) (string, error) {
+func InfoFile(path, version string) (*RevInfo, string, error) {
if !semver.IsValid(version) {
- return "", fmt.Errorf("invalid version %q", version)
+ return nil, "", fmt.Errorf("invalid version %q", version)
}
- if file, _, err := readDiskStat(path, version); err == nil {
- return file, nil
+ if file, info, err := readDiskStat(path, version); err == nil {
+ return info, file, nil
}
+ var info *RevInfo
+ var err2info map[error]*RevInfo
err := TryProxies(func(proxy string) error {
- _, err := Lookup(proxy, path).Stat(version)
+ i, err := Lookup(proxy, path).Stat(version)
+ if err == nil {
+ info = i
+ } else {
+ if err2info == nil {
+ err2info = make(map[error]*RevInfo)
+ }
+ err2info[err] = info
+ }
return err
})
if err != nil {
- return "", err
+ return err2info[err], "", err
}
// Stat should have populated the disk cache for us.
file, err := CachePath(module.Version{Path: path, Version: version}, "info")
if err != nil {
- return "", err
+ return nil, "", err
}
- return file, nil
+ return info, file, nil
}
// GoMod is like Lookup(path).GoMod(rev) but avoids the
@@ -561,6 +581,26 @@ func writeDiskStat(file string, info *RevInfo) error {
if file == "" {
return nil
}
+
+ if info.Origin != nil {
+ // Clean the origin information, which might have too many
+ // validation criteria, for example if we are saving the result of
+ // m@master as m@pseudo-version.
+ clean := *info
+ info = &clean
+ o := *info.Origin
+ info.Origin = &o
+
+ // Tags never matter if you are starting with a semver version,
+ // as we would be when finding this cache entry.
+ o.TagSum = ""
+ o.TagPrefix = ""
+ // Ref doesn't matter if you have a pseudoversion.
+ if module.IsPseudoVersion(info.Version) {
+ o.Ref = ""
+ }
+ }
+
js, err := json.Marshal(info)
if err != nil {
return err
diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go
index e08a84b32c..747022759e 100644
--- a/src/cmd/go/internal/modfetch/codehost/codehost.go
+++ b/src/cmd/go/internal/modfetch/codehost/codehost.go
@@ -22,6 +22,9 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/lockedfile"
"cmd/go/internal/str"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
)
// Downloaded size limits.
@@ -36,8 +39,15 @@ const (
// remote version control servers, and code hosting sites.
// A Repo must be safe for simultaneous use by multiple goroutines.
type Repo interface {
+ // CheckReuse checks whether the old origin information
+ // remains up to date. If so, whatever cached object it was
+ // taken from can be reused.
+ // The subdir gives subdirectory name where the module root is expected to be found,
+ // "" for the root or "sub/dir" for a subdirectory (no trailing slash).
+ CheckReuse(old *Origin, subdir string) error
+
// List lists all tags with the given prefix.
- Tags(prefix string) (tags []string, err error)
+ Tags(prefix string) (*Tags, error)
// Stat returns information about the revision rev.
// A revision can be any identifier known to the underlying service:
@@ -74,8 +84,88 @@ type Repo interface {
DescendsFrom(rev, tag string) (bool, error)
}
-// A Rev describes a single revision in a source code repository.
+// An Origin describes the provenance of a given repo method result.
+// It can be passed to CheckReuse (usually in a different go command invocation)
+// to see whether the result remains up-to-date.
+type Origin struct {
+ VCS string `json:",omitempty"` // "git" etc
+ URL string `json:",omitempty"` // URL of repository
+ Subdir string `json:",omitempty"` // subdirectory in repo
+
+ // If TagSum is non-empty, then the resolution of this module version
+ // depends on the set of tags present in the repo, specifically the tags
+ // of the form TagPrefix + a valid semver version.
+ // If the matching repo tags and their commit hashes still hash to TagSum,
+ // the Origin is still valid (at least as far as the tags are concerned).
+ // The exact checksum is up to the Repo implementation; see (*gitRepo).Tags.
+ TagPrefix string `json:",omitempty"`
+ TagSum string `json:",omitempty"`
+
+ // If Ref is non-empty, then the resolution of this module version
+ // depends on Ref resolving to the revision identified by Hash.
+ // If Ref still resolves to Hash, the Origin is still valid (at least as far as Ref is concerned).
+ // For Git, the Ref is a full ref like "refs/heads/main" or "refs/tags/v1.2.3",
+ // and the Hash is the Git object hash the ref maps to.
+ // Other VCS might choose differently, but the idea is that Ref is the name
+ // with a mutable meaning while Hash is a name with an immutable meaning.
+ Ref string `json:",omitempty"`
+ Hash string `json:",omitempty"`
+
+ // If RepoSum is non-empty, then the resolution of this module version
+ // failed due to the repo being available but the version not being present.
+ // This depends on the entire state of the repo, which RepoSum summarizes.
+ // For Git, this is a hash of all the refs and their hashes.
+ RepoSum string `json:",omitempty"`
+}
+
+// Checkable reports whether the Origin contains anything that can be checked.
+// If not, the Origin is purely informational and should fail a CheckReuse call.
+func (o *Origin) Checkable() bool {
+ return o.TagSum != "" || o.Ref != "" || o.Hash != "" || o.RepoSum != ""
+}
+
+// ClearCheckable clears the Origin enough to make Checkable return false.
+func (o *Origin) ClearCheckable() {
+ o.TagSum = ""
+ o.TagPrefix = ""
+ o.Ref = ""
+ o.Hash = ""
+ o.RepoSum = ""
+}
+
+// A Tags describes the available tags in a code repository.
+type Tags struct {
+ Origin *Origin
+ List []Tag
+}
+
+// A Tag describes a single tag in a code repository.
+type Tag struct {
+ Name string
+ Hash string // content hash identifying tag's content, if available
+}
+
+// isOriginTag reports whether tag should be preserved
+// in the Tags method's Origin calculation.
+// We can safely ignore tags that are not look like pseudo-versions,
+// because ../coderepo.go's (*codeRepo).Versions ignores them too.
+// We can also ignore non-semver tags, but we have to include semver
+// tags with extra suffixes, because the pseudo-version base finder uses them.
+func isOriginTag(tag string) bool {
+ // modfetch.(*codeRepo).Versions uses Canonical == tag,
+ // but pseudo-version calculation has a weaker condition that
+ // the canonical is a prefix of the tag.
+ // Include those too, so that if any new one appears, we'll invalidate the cache entry.
+ // This will lead to spurious invalidation of version list results,
+ // but tags of this form being created should be fairly rare
+ // (and invalidate pseudo-version results anyway).
+ c := semver.Canonical(tag)
+ return c != "" && strings.HasPrefix(tag, c) && !module.IsPseudoVersion(tag)
+}
+
+// A RevInfo describes a single revision in a source code repository.
type RevInfo struct {
+ Origin *Origin
Name string // complete ID in underlying repository
Short string // shortened ID, for use in pseudo-version
Version string // version used in lookup
@@ -157,7 +247,7 @@ func WorkDir(typ, name string) (dir, lockfile string, err error) {
lockfile = dir + ".lock"
if cfg.BuildX {
- fmt.Fprintf(os.Stderr, "# lock %s", lockfile)
+ fmt.Fprintf(os.Stderr, "# lock %s\n", lockfile)
}
unlock, err := lockedfile.MutexAt(lockfile).Lock()
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index 034abf360b..35f77e870e 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -6,6 +6,8 @@ package codehost
import (
"bytes"
+ "crypto/sha256"
+ "encoding/base64"
"errors"
"fmt"
"io"
@@ -169,6 +171,57 @@ func (r *gitRepo) loadLocalTags() {
}
}
+func (r *gitRepo) CheckReuse(old *Origin, subdir string) error {
+ if old == nil {
+ return fmt.Errorf("missing origin")
+ }
+ if old.VCS != "git" || old.URL != r.remoteURL {
+ return fmt.Errorf("origin moved from %v %q to %v %q", old.VCS, old.URL, "git", r.remoteURL)
+ }
+ if old.Subdir != subdir {
+ return fmt.Errorf("origin moved from %v %q %q to %v %q %q", old.VCS, old.URL, old.Subdir, "git", r.remoteURL, subdir)
+ }
+
+ // Note: Can have Hash with no Ref and no TagSum and no RepoSum,
+ // meaning the Hash simply has to remain in the repo.
+ // In that case we assume it does in the absence of any real way to check.
+ // But if neither Hash nor TagSum is present, we have nothing to check,
+ // which we take to mean we didn't record enough information to be sure.
+ if old.Hash == "" && old.TagSum == "" && old.RepoSum == "" {
+ return fmt.Errorf("non-specific origin")
+ }
+
+ r.loadRefs()
+ if r.refsErr != nil {
+ return r.refsErr
+ }
+
+ if old.Ref != "" {
+ hash, ok := r.refs[old.Ref]
+ if !ok {
+ return fmt.Errorf("ref %q deleted", old.Ref)
+ }
+ if hash != old.Hash {
+ return fmt.Errorf("ref %q moved from %s to %s", old.Ref, old.Hash, hash)
+ }
+ }
+ if old.TagSum != "" {
+ tags, err := r.Tags(old.TagPrefix)
+ if err != nil {
+ return err
+ }
+ if tags.Origin.TagSum != old.TagSum {
+ return fmt.Errorf("tags changed")
+ }
+ }
+ if old.RepoSum != "" {
+ if r.repoSum(r.refs) != old.RepoSum {
+ return fmt.Errorf("refs changed")
+ }
+ }
+ return nil
+}
+
// loadRefs loads heads and tags references from the remote into the map r.refs.
// The result is cached in memory.
func (r *gitRepo) loadRefs() (map[string]string, error) {
@@ -219,14 +272,21 @@ func (r *gitRepo) loadRefs() (map[string]string, error) {
return r.refs, r.refsErr
}
-func (r *gitRepo) Tags(prefix string) ([]string, error) {
+func (r *gitRepo) Tags(prefix string) (*Tags, error) {
refs, err := r.loadRefs()
if err != nil {
return nil, err
}
- tags := []string{}
- for ref := range refs {
+ tags := &Tags{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ TagPrefix: prefix,
+ },
+ List: []Tag{},
+ }
+ for ref, hash := range refs {
if !strings.HasPrefix(ref, "refs/tags/") {
continue
}
@@ -234,12 +294,52 @@ func (r *gitRepo) Tags(prefix string) ([]string, error) {
if !strings.HasPrefix(tag, prefix) {
continue
}
- tags = append(tags, tag)
+ tags.List = append(tags.List, Tag{tag, hash})
}
- sort.Strings(tags)
+ sort.Slice(tags.List, func(i, j int) bool {
+ return tags.List[i].Name < tags.List[j].Name
+ })
+
+ dir := prefix[:strings.LastIndex(prefix, "/")+1]
+ h := sha256.New()
+ for _, tag := range tags.List {
+ if isOriginTag(strings.TrimPrefix(tag.Name, dir)) {
+ fmt.Fprintf(h, "%q %s\n", tag.Name, tag.Hash)
+ }
+ }
+ tags.Origin.TagSum = "t1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
return tags, nil
}
+// repoSum returns a checksum of the entire repo state,
+// which can be checked (as Origin.RepoSum) to cache
+// the absence of a specific module version.
+// The caller must supply refs, the result of a successful r.loadRefs.
+func (r *gitRepo) repoSum(refs map[string]string) string {
+ var list []string
+ for ref := range refs {
+ list = append(list, ref)
+ }
+ sort.Strings(list)
+ h := sha256.New()
+ for _, ref := range list {
+ fmt.Fprintf(h, "%q %s\n", ref, refs[ref])
+ }
+ return "r1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+// unknownRevisionInfo returns a RevInfo containing an Origin containing a RepoSum of refs,
+// for use when returning an UnknownRevisionError.
+func (r *gitRepo) unknownRevisionInfo(refs map[string]string) *RevInfo {
+ return &RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ RepoSum: r.repoSum(refs),
+ },
+ }
+}
+
func (r *gitRepo) Latest() (*RevInfo, error) {
refs, err := r.loadRefs()
if err != nil {
@@ -248,7 +348,13 @@ func (r *gitRepo) Latest() (*RevInfo, error) {
if refs["HEAD"] == "" {
return nil, ErrNoCommits
}
- return r.Stat(refs["HEAD"])
+ info, err := r.Stat(refs["HEAD"])
+ if err != nil {
+ return nil, err
+ }
+ info.Origin.Ref = "HEAD"
+ info.Origin.Hash = refs["HEAD"]
+ return info, nil
}
// findRef finds some ref name for the given hash,
@@ -278,7 +384,7 @@ const minHashDigits = 7
// stat stats the given rev in the local repository,
// or else it fetches more info from the remote repository and tries again.
-func (r *gitRepo) stat(rev string) (*RevInfo, error) {
+func (r *gitRepo) stat(rev string) (info *RevInfo, err error) {
if r.local {
return r.statLocal(rev, rev)
}
@@ -345,9 +451,19 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) {
hash = rev
}
} else {
- return nil, &UnknownRevisionError{Rev: rev}
+ return r.unknownRevisionInfo(refs), &UnknownRevisionError{Rev: rev}
}
+ defer func() {
+ if info != nil {
+ info.Origin.Hash = info.Name
+ // There's a ref = hash below; don't write that hash down as Origin.Ref.
+ if ref != info.Origin.Hash {
+ info.Origin.Ref = ref
+ }
+ }
+ }()
+
// Protect r.fetchLevel and the "fetch more and more" sequence.
unlock, err := r.mu.Lock()
if err != nil {
@@ -449,7 +565,12 @@ func (r *gitRepo) fetchRefsLocked() error {
func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) {
out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--")
if err != nil {
- return nil, &UnknownRevisionError{Rev: rev}
+ // Return info with Origin.RepoSum if possible to allow caching of negative lookup.
+ var info *RevInfo
+ if refs, err := r.loadRefs(); err == nil {
+ info = r.unknownRevisionInfo(refs)
+ }
+ return info, &UnknownRevisionError{Rev: rev}
}
f := strings.Fields(string(out))
if len(f) < 2 {
@@ -465,11 +586,19 @@ func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) {
}
info := &RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: r.remoteURL,
+ Hash: hash,
+ },
Name: hash,
Short: ShortenSHA1(hash),
Time: time.Unix(t, 0).UTC(),
Version: hash,
}
+ if !strings.HasPrefix(hash, rev) {
+ info.Origin.Ref = rev
+ }
// Add tags. Output looks like:
// ede458df7cd0fdca520df19a33158086a8a68e81 1523994202 HEAD -> master, tag: v1.2.4-annotated, tag: v1.2.3, origin/master, origin/HEAD
@@ -580,7 +709,7 @@ func (r *gitRepo) RecentTag(rev, prefix string, allowed func(tag string) bool) (
if err != nil {
return "", err
}
- if len(tags) == 0 {
+ if len(tags.List) == 0 {
return "", nil
}
@@ -634,7 +763,7 @@ func (r *gitRepo) DescendsFrom(rev, tag string) (bool, error) {
if err != nil {
return false, err
}
- if len(tags) == 0 {
+ if len(tags.List) == 0 {
return false, nil
}
diff --git a/src/cmd/go/internal/modfetch/codehost/git_test.go b/src/cmd/go/internal/modfetch/codehost/git_test.go
index a684fa1a9b..6a4212fc5a 100644
--- a/src/cmd/go/internal/modfetch/codehost/git_test.go
+++ b/src/cmd/go/internal/modfetch/codehost/git_test.go
@@ -43,7 +43,7 @@ var altRepos = []string{
// For now, at least the hgrepo1 tests check the general vcs.go logic.
// localGitRepo is like gitrepo1 but allows archive access.
-var localGitRepo string
+var localGitRepo, localGitURL string
func testMain(m *testing.M) int {
dir, err := os.MkdirTemp("", "gitrepo-test-")
@@ -65,6 +65,15 @@ func testMain(m *testing.M) int {
if _, err := Run(localGitRepo, "git", "config", "daemon.uploadarch", "true"); err != nil {
log.Fatal(err)
}
+
+ // Convert absolute path to file URL. LocalGitRepo will not accept
+ // Windows absolute paths because they look like a host:path remote.
+ // TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
+ if strings.HasPrefix(localGitRepo, "/") {
+ localGitURL = "file://" + localGitRepo
+ } else {
+ localGitURL = "file:///" + filepath.ToSlash(localGitRepo)
+ }
}
}
@@ -73,17 +82,8 @@ func testMain(m *testing.M) int {
func testRepo(t *testing.T, remote string) (Repo, error) {
if remote == "localGitRepo" {
- // Convert absolute path to file URL. LocalGitRepo will not accept
- // Windows absolute paths because they look like a host:path remote.
- // TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
- var url string
- if strings.HasPrefix(localGitRepo, "/") {
- url = "file://" + localGitRepo
- } else {
- url = "file:///" + filepath.ToSlash(localGitRepo)
- }
testenv.MustHaveExecPath(t, "git")
- return LocalGitRepo(url)
+ return LocalGitRepo(localGitURL)
}
vcs := "git"
for _, k := range []string{"hg"} {
@@ -98,13 +98,28 @@ func testRepo(t *testing.T, remote string) (Repo, error) {
var tagsTests = []struct {
repo string
prefix string
- tags []string
+ tags []Tag
}{
- {gitrepo1, "xxx", []string{}},
- {gitrepo1, "", []string{"v1.2.3", "v1.2.4-annotated", "v2.0.1", "v2.0.2", "v2.3"}},
- {gitrepo1, "v", []string{"v1.2.3", "v1.2.4-annotated", "v2.0.1", "v2.0.2", "v2.3"}},
- {gitrepo1, "v1", []string{"v1.2.3", "v1.2.4-annotated"}},
- {gitrepo1, "2", []string{}},
+ {gitrepo1, "xxx", []Tag{}},
+ {gitrepo1, "", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
+ {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ }},
+ {gitrepo1, "v", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
+ {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
+ }},
+ {gitrepo1, "v1", []Tag{
+ {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
+ }},
+ {gitrepo1, "2", []Tag{}},
}
func TestTags(t *testing.T) {
@@ -121,13 +136,24 @@ func TestTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !reflect.DeepEqual(tags, tt.tags) {
- t.Errorf("Tags: incorrect tags\nhave %v\nwant %v", tags, tt.tags)
+ if tags == nil || !reflect.DeepEqual(tags.List, tt.tags) {
+ t.Errorf("Tags(%q): incorrect tags\nhave %v\nwant %v", tt.prefix, tags, tt.tags)
}
}
t.Run(path.Base(tt.repo)+"/"+tt.prefix, f)
if tt.repo == gitrepo1 {
+ // Clear hashes.
+ clearTags := []Tag{}
+ for _, tag := range tt.tags {
+ clearTags = append(clearTags, Tag{tag.Name, ""})
+ }
+ tags := tt.tags
for _, tt.repo = range altRepos {
+ if strings.Contains(tt.repo, "Git") {
+ tt.tags = tags
+ } else {
+ tt.tags = clearTags
+ }
t.Run(path.Base(tt.repo)+"/"+tt.prefix, f)
}
}
@@ -141,6 +167,12 @@ var latestTests = []struct {
{
gitrepo1,
&RevInfo{
+ Origin: &Origin{
+ VCS: "git",
+ URL: "https://vcs-test.golang.org/git/gitrepo1",
+ Ref: "HEAD",
+ Hash: "ede458df7cd0fdca520df19a33158086a8a68e81",
+ },
Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
Short: "ede458df7cd0",
Version: "ede458df7cd0fdca520df19a33158086a8a68e81",
@@ -151,6 +183,11 @@ var latestTests = []struct {
{
hgrepo1,
&RevInfo{
+ Origin: &Origin{
+ VCS: "hg",
+ URL: "https://vcs-test.golang.org/hg/hgrepo1",
+ Hash: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
+ },
Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
Short: "18518c07eb8e",
Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
@@ -174,12 +211,17 @@ func TestLatest(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(info, tt.info) {
- t.Errorf("Latest: incorrect info\nhave %+v\nwant %+v", *info, *tt.info)
+ t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin)
}
}
t.Run(path.Base(tt.repo), f)
if tt.repo == gitrepo1 {
tt.repo = "localGitRepo"
+ info := *tt.info
+ tt.info = &info
+ o := *info.Origin
+ info.Origin = &o
+ o.URL = localGitURL
t.Run(path.Base(tt.repo), f)
}
}
@@ -590,11 +632,12 @@ func TestStat(t *testing.T) {
if !strings.Contains(err.Error(), tt.err) {
t.Fatalf("Stat: wrong error %q, want %q", err, tt.err)
}
- if info != nil {
- t.Errorf("Stat: non-nil info with error %q", err)
+ if info != nil && info.Origin == nil {
+ t.Errorf("Stat: non-nil info with nil Origin with error %q", err)
}
return
}
+ info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough
if !reflect.DeepEqual(info, tt.info) {
t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info)
}
diff --git a/src/cmd/go/internal/modfetch/codehost/vcs.go b/src/cmd/go/internal/modfetch/codehost/vcs.go
index de62265efc..f1c40998b2 100644
--- a/src/cmd/go/internal/modfetch/codehost/vcs.go
+++ b/src/cmd/go/internal/modfetch/codehost/vcs.go
@@ -290,7 +290,13 @@ func (r *vcsRepo) loadBranches() {
}
}
-func (r *vcsRepo) Tags(prefix string) ([]string, error) {
+var ErrNoRepoHash = errors.New("RepoHash not supported")
+
+func (r *vcsRepo) CheckReuse(old *Origin, subdir string) error {
+ return fmt.Errorf("vcs %s does not implement CheckReuse", r.cmd.vcs)
+}
+
+func (r *vcsRepo) Tags(prefix string) (*Tags, error) {
unlock, err := r.mu.Lock()
if err != nil {
return nil, err
@@ -298,14 +304,24 @@ func (r *vcsRepo) Tags(prefix string) ([]string, error) {
defer unlock()
r.tagsOnce.Do(r.loadTags)
-
- tags := []string{}
+ tags := &Tags{
+ // None of the other VCS provide a reasonable way to compute TagSum
+ // without downloading the whole repo, so we only include VCS and URL
+ // in the Origin.
+ Origin: &Origin{
+ VCS: r.cmd.vcs,
+ URL: r.remote,
+ },
+ List: []Tag{},
+ }
for tag := range r.tags {
if strings.HasPrefix(tag, prefix) {
- tags = append(tags, tag)
+ tags.List = append(tags.List, Tag{tag, ""})
}
}
- sort.Strings(tags)
+ sort.Slice(tags.List, func(i, j int) bool {
+ return tags.List[i].Name < tags.List[j].Name
+ })
return tags, nil
}
@@ -352,7 +368,16 @@ func (r *vcsRepo) statLocal(rev string) (*RevInfo, error) {
if err != nil {
return nil, &UnknownRevisionError{Rev: rev}
}
- return r.cmd.parseStat(rev, string(out))
+ info, err := r.cmd.parseStat(rev, string(out))
+ if err != nil {
+ return nil, err
+ }
+ if info.Origin == nil {
+ info.Origin = new(Origin)
+ }
+ info.Origin.VCS = r.cmd.vcs
+ info.Origin.URL = r.remote
+ return info, nil
}
func (r *vcsRepo) Latest() (*RevInfo, error) {
@@ -491,6 +516,9 @@ func hgParseStat(rev, out string) (*RevInfo, error) {
sort.Strings(tags)
info := &RevInfo{
+ Origin: &Origin{
+ Hash: hash,
+ },
Name: hash,
Short: ShortenSHA1(hash),
Time: time.Unix(t, 0).UTC(),
@@ -569,6 +597,9 @@ func fossilParseStat(rev, out string) (*RevInfo, error) {
version = hash // extend to full hash
}
info := &RevInfo{
+ Origin: &Origin{
+ Hash: hash,
+ },
Name: hash,
Short: ShortenSHA1(hash),
Time: t,
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index ff1cea1d94..8fb0035f8c 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -130,12 +130,16 @@ func (r *codeRepo) ModulePath() string {
return r.modPath
}
-func (r *codeRepo) Versions(prefix string) ([]string, error) {
+func (r *codeRepo) CheckReuse(old *codehost.Origin) error {
+ return r.code.CheckReuse(old, r.codeDir)
+}
+
+func (r *codeRepo) Versions(prefix string) (*Versions, error) {
// Special case: gopkg.in/macaroon-bakery.v2-unstable
// does not use the v2 tags (those are for macaroon-bakery.v2).
// It has no possible tags at all.
if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") {
- return nil, nil
+ return &Versions{}, nil
}
p := prefix
@@ -149,16 +153,21 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
Err: err,
}
}
+ if tags.Origin != nil {
+ tags.Origin.Subdir = r.codeDir
+ }
var list, incompatible []string
- for _, tag := range tags {
- if !strings.HasPrefix(tag, p) {
+ for _, tag := range tags.List {
+ if !strings.HasPrefix(tag.Name, p) {
continue
}
- v := tag
+ v := tag.Name
if r.codeDir != "" {
v = v[len(r.codeDir)+1:]
}
+ // Note: ./codehost/codehost.go's isOriginTag knows about these conditions too.
+ // If these are relaxed, isOriginTag will need to be relaxed as well.
if v == "" || v != semver.Canonical(v) {
// Ignore non-canonical tags: Stat rewrites those to canonical
// pseudo-versions. Note that we compare against semver.Canonical here
@@ -186,7 +195,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
semver.Sort(list)
semver.Sort(incompatible)
- return r.appendIncompatibleVersions(list, incompatible)
+ return r.appendIncompatibleVersions(tags.Origin, list, incompatible)
}
// appendIncompatibleVersions appends "+incompatible" versions to list if
@@ -196,10 +205,14 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
// prefix.
//
// Both list and incompatible must be sorted in semantic order.
-func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]string, error) {
+func (r *codeRepo) appendIncompatibleVersions(origin *codehost.Origin, list, incompatible []string) (*Versions, error) {
+ versions := &Versions{
+ Origin: origin,
+ List: list,
+ }
if len(incompatible) == 0 || r.pathMajor != "" {
// No +incompatible versions are possible, so no need to check them.
- return list, nil
+ return versions, nil
}
versionHasGoMod := func(v string) (bool, error) {
@@ -232,7 +245,7 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
// (github.com/russross/blackfriday@v2.0.0 and
// github.com/libp2p/go-libp2p@v6.0.23), and (as of 2019-10-29) have no
// concrete examples for which it is undesired.
- return list, nil
+ return versions, nil
}
}
@@ -271,10 +284,10 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
// bounds.
continue
}
- list = append(list, v+"+incompatible")
+ versions.List = append(versions.List, v+"+incompatible")
}
- return list, nil
+ return versions, nil
}
func (r *codeRepo) Stat(rev string) (*RevInfo, error) {
@@ -284,7 +297,15 @@ func (r *codeRepo) Stat(rev string) (*RevInfo, error) {
codeRev := r.revToRev(rev)
info, err := r.code.Stat(codeRev)
if err != nil {
- return nil, &module.ModuleError{
+ // Note: info may be non-nil to supply Origin for caching error.
+ var revInfo *RevInfo
+ if info != nil {
+ revInfo = &RevInfo{
+ Origin: info.Origin,
+ Version: rev,
+ }
+ }
+ return revInfo, &module.ModuleError{
Path: r.modPath,
Err: &module.InvalidVersionError{
Version: rev,
@@ -439,7 +460,31 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return nil, errIncompatible
}
+ origin := info.Origin
+ if origin != nil {
+ o := *origin
+ origin = &o
+ origin.Subdir = r.codeDir
+ if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) {
+ // Add tags that are relevant to pseudo-version calculation to origin.
+ prefix := r.codeDir
+ if prefix != "" {
+ prefix += "/"
+ }
+ if r.pathMajor != "" { // "/v2" or "/.v2"
+ prefix += r.pathMajor[1:] + "." // += "v2."
+ }
+ tags, err := r.code.Tags(prefix)
+ if err != nil {
+ return nil, err
+ }
+ origin.TagPrefix = tags.Origin.TagPrefix
+ origin.TagSum = tags.Origin.TagSum
+ }
+ }
+
return &RevInfo{
+ Origin: origin,
Name: info.Name,
Short: info.Short,
Time: info.Time,
@@ -674,11 +719,11 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string)
var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base.
ancestorFound := false
- for _, tag := range tags {
- versionOnly := strings.TrimPrefix(tag, tagPrefix)
+ for _, tag := range tags.List {
+ versionOnly := strings.TrimPrefix(tag.Name, tagPrefix)
if semver.Compare(versionOnly, base) == 0 {
- lastTag = tag
- ancestorFound, err = r.code.DescendsFrom(info.Name, tag)
+ lastTag = tag.Name
+ ancestorFound, err = r.code.DescendsFrom(info.Name, tag.Name)
if ancestorFound {
break
}
@@ -747,7 +792,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
file1 := path.Join(r.codeDir, "go.mod")
gomod1, err1 := r.code.ReadFile(rev, file1, codehost.MaxGoMod)
if err1 != nil && !os.IsNotExist(err1) {
- return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file1, rev, err1)
+ return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file1, rev, err1)
}
mpath1 := modfile.ModulePath(gomod1)
found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1))
@@ -765,7 +810,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
file2 = path.Join(dir2, "go.mod")
gomod2, err2 := r.code.ReadFile(rev, file2, codehost.MaxGoMod)
if err2 != nil && !os.IsNotExist(err2) {
- return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file2, rev, err2)
+ return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file2, rev, err2)
}
mpath2 := modfile.ModulePath(gomod2)
found2 := err2 == nil && isMajor(mpath2, r.pathMajor)
@@ -778,9 +823,9 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
}
if err2 == nil {
if mpath2 == "" {
- return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.pathPrefix, file2, rev)
+ return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.codeRoot, file2, rev)
}
- return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.pathPrefix, file2, r.pathMajor, mpath2, rev)
+ return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.codeRoot, file2, r.pathMajor, mpath2, rev)
}
}
@@ -922,10 +967,11 @@ func (r *codeRepo) modPrefix(rev string) string {
}
func (r *codeRepo) retractedVersions() (func(string) bool, error) {
- versions, err := r.Versions("")
+ vs, err := r.Versions("")
if err != nil {
return nil, err
}
+ versions := vs.List
for i, v := range versions {
if strings.HasSuffix(v, "+incompatible") {
diff --git a/src/cmd/go/internal/modfetch/coderepo_test.go b/src/cmd/go/internal/modfetch/coderepo_test.go
index 8d0eb00544..967978cd4d 100644
--- a/src/cmd/go/internal/modfetch/coderepo_test.go
+++ b/src/cmd/go/internal/modfetch/coderepo_test.go
@@ -823,7 +823,7 @@ func TestCodeRepoVersions(t *testing.T) {
if err != nil {
t.Fatalf("Versions(%q): %v", tt.prefix, err)
}
- if !reflect.DeepEqual(list, tt.versions) {
+ if !reflect.DeepEqual(list.List, tt.versions) {
t.Fatalf("Versions(%q):\nhave %v\nwant %v", tt.prefix, list, tt.versions)
}
})
@@ -921,7 +921,13 @@ type fixedTagsRepo struct {
codehost.Repo
}
-func (ch *fixedTagsRepo) Tags(string) ([]string, error) { return ch.tags, nil }
+func (ch *fixedTagsRepo) Tags(string) (*codehost.Tags, error) {
+ tags := &codehost.Tags{}
+ for _, t := range ch.tags {
+ tags.List = append(tags.List, codehost.Tag{Name: t})
+ }
+ return tags, nil
+}
func TestNonCanonicalSemver(t *testing.T) {
root := "golang.org/x/issue24476"
@@ -945,7 +951,7 @@ func TestNonCanonicalSemver(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if len(v) != 1 || v[0] != "v1.0.1" {
+ if len(v.List) != 1 || v.List[0] != "v1.0.1" {
t.Fatal("unexpected versions returned:", v)
}
}
diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go
index 2491b7d185..d2374680d8 100644
--- a/src/cmd/go/internal/modfetch/proxy.go
+++ b/src/cmd/go/internal/modfetch/proxy.go
@@ -225,6 +225,12 @@ func (p *proxyRepo) ModulePath() string {
return p.path
}
+var errProxyReuse = fmt.Errorf("proxy does not support CheckReuse")
+
+func (p *proxyRepo) CheckReuse(old *codehost.Origin) error {
+ return errProxyReuse
+}
+
// versionError returns err wrapped in a ModuleError for p.path.
func (p *proxyRepo) versionError(version string, err error) error {
if version != "" && version != module.CanonicalVersion(version) {
@@ -279,7 +285,7 @@ func (p *proxyRepo) getBody(path string) (r io.ReadCloser, err error) {
return resp.Body, nil
}
-func (p *proxyRepo) Versions(prefix string) ([]string, error) {
+func (p *proxyRepo) Versions(prefix string) (*Versions, error) {
data, err := p.getBytes("@v/list")
if err != nil {
p.listLatestOnce.Do(func() {
@@ -299,7 +305,7 @@ func (p *proxyRepo) Versions(prefix string) ([]string, error) {
p.listLatest, p.listLatestErr = p.latestFromList(allLine)
})
semver.Sort(list)
- return list, nil
+ return &Versions{List: list}, nil
}
func (p *proxyRepo) latest() (*RevInfo, error) {
@@ -317,9 +323,8 @@ func (p *proxyRepo) latest() (*RevInfo, error) {
func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
var (
- bestTime time.Time
- bestTimeIsFromPseudo bool
- bestVersion string
+ bestTime time.Time
+ bestVersion string
)
for _, line := range allLine {
f := strings.Fields(line)
@@ -327,14 +332,12 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
// If the proxy includes timestamps, prefer the timestamp it reports.
// Otherwise, derive the timestamp from the pseudo-version.
var (
- ft time.Time
- ftIsFromPseudo = false
+ ft time.Time
)
if len(f) >= 2 {
ft, _ = time.Parse(time.RFC3339, f[1])
} else if module.IsPseudoVersion(f[0]) {
ft, _ = module.PseudoVersionTime(f[0])
- ftIsFromPseudo = true
} else {
// Repo.Latest promises that this method is only called where there are
// no tagged versions. Ignore any tagged versions that were added in the
@@ -343,7 +346,6 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
}
if bestTime.Before(ft) {
bestTime = ft
- bestTimeIsFromPseudo = ftIsFromPseudo
bestVersion = f[0]
}
}
@@ -352,22 +354,8 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
return nil, p.versionError("", codehost.ErrNoCommits)
}
- if bestTimeIsFromPseudo {
- // We parsed bestTime from the pseudo-version, but that's in UTC and we're
- // supposed to report the timestamp as reported by the VCS.
- // Stat the selected version to canonicalize the timestamp.
- //
- // TODO(bcmills): Should we also stat other versions to ensure that we
- // report the correct Name and Short for the revision?
- return p.Stat(bestVersion)
- }
-
- return &RevInfo{
- Version: bestVersion,
- Name: bestVersion,
- Short: bestVersion,
- Time: bestTime,
- }, nil
+ // Call Stat to get all the other fields, including Origin information.
+ return p.Stat(bestVersion)
}
func (p *proxyRepo) Stat(rev string) (*RevInfo, error) {
diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go
index 1b42ecb6ed..d4c57bb300 100644
--- a/src/cmd/go/internal/modfetch/repo.go
+++ b/src/cmd/go/internal/modfetch/repo.go
@@ -29,6 +29,12 @@ type Repo interface {
// ModulePath returns the module path.
ModulePath() string
+ // CheckReuse checks whether the validation criteria in the origin
+ // are still satisfied on the server corresponding to this module.
+ // If so, the caller can reuse any cached Versions or RevInfo containing
+ // this origin rather than redownloading those from the server.
+ CheckReuse(old *codehost.Origin) error
+
// Versions lists all known versions with the given prefix.
// Pseudo-versions are not included.
//
@@ -42,7 +48,7 @@ type Repo interface {
//
// If the underlying repository does not exist,
// Versions returns an error matching errors.Is(_, os.NotExist).
- Versions(prefix string) ([]string, error)
+ Versions(prefix string) (*Versions, error)
// Stat returns information about the revision rev.
// A revision can be any identifier known to the underlying service:
@@ -61,7 +67,14 @@ type Repo interface {
Zip(dst io.Writer, version string) error
}
-// A Rev describes a single revision in a module repository.
+// A Versions describes the available versions in a module repository.
+type Versions struct {
+ Origin *codehost.Origin `json:",omitempty"` // origin information for reuse
+
+ List []string // semver versions
+}
+
+// A RevInfo describes a single revision in a module repository.
type RevInfo struct {
Version string // suggested version string for this revision
Time time.Time // commit time
@@ -70,6 +83,8 @@ type RevInfo struct {
// but they are not recorded when talking about module versions.
Name string `json:"-"` // complete ID in underlying repository
Short string `json:"-"` // shortened ID, for use in pseudo-version
+
+ Origin *codehost.Origin `json:",omitempty"` // provenance for reuse
}
// Re: module paths, import paths, repository roots, and lookups
@@ -320,7 +335,14 @@ func (l *loggingRepo) ModulePath() string {
return l.r.ModulePath()
}
-func (l *loggingRepo) Versions(prefix string) (tags []string, err error) {
+func (l *loggingRepo) CheckReuse(old *codehost.Origin) (err error) {
+ defer func() {
+ logCall("CheckReuse[%s]: %v", l.r.ModulePath(), err)
+ }()
+ return l.r.CheckReuse(old)
+}
+
+func (l *loggingRepo) Versions(prefix string) (*Versions, error) {
defer logCall("Repo[%s]: Versions(%q)", l.r.ModulePath(), prefix)()
return l.r.Versions(prefix)
}
@@ -360,11 +382,12 @@ type errRepo struct {
func (r errRepo) ModulePath() string { return r.modulePath }
-func (r errRepo) Versions(prefix string) (tags []string, err error) { return nil, r.err }
-func (r errRepo) Stat(rev string) (*RevInfo, error) { return nil, r.err }
-func (r errRepo) Latest() (*RevInfo, error) { return nil, r.err }
-func (r errRepo) GoMod(version string) ([]byte, error) { return nil, r.err }
-func (r errRepo) Zip(dst io.Writer, version string) error { return r.err }
+func (r errRepo) CheckReuse(old *codehost.Origin) error { return r.err }
+func (r errRepo) Versions(prefix string) (*Versions, error) { return nil, r.err }
+func (r errRepo) Stat(rev string) (*RevInfo, error) { return nil, r.err }
+func (r errRepo) Latest() (*RevInfo, error) { return nil, r.err }
+func (r errRepo) GoMod(version string) ([]byte, error) { return nil, r.err }
+func (r errRepo) Zip(dst io.Writer, version string) error { return r.err }
// A notExistError is like fs.ErrNotExist, but with a custom message
type notExistError struct {
diff --git a/src/cmd/go/internal/modindex/index_test.go b/src/cmd/go/internal/modindex/index_test.go
new file mode 100644
index 0000000000..2c072f909d
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_test.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "go/build"
+ "internal/diff"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+func init() {
+ isTest = true
+ enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
+}
+
+func TestIndex(t *testing.T) {
+ src := filepath.Join(runtime.GOROOT(), "src")
+ checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
+ p := m.Package(pkg)
+ bp, err := p.Import(build.Default, build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bp1, err := build.Default.Import(pkg, filepath.Join(src, pkg), build.ImportComment)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(bp, bp1) {
+ t.Errorf("mismatch")
+ t.Logf("index:\n%s", hex.Dump(data))
+
+ js, err := json.MarshalIndent(bp, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ js1, err := json.MarshalIndent(bp1, "", "\t")
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
+ t.FailNow()
+ }
+ }
+
+ // Check packages in increasing complexity, one at a time.
+ pkgs := []string{
+ "crypto",
+ "encoding",
+ "unsafe",
+ "encoding/json",
+ "runtime",
+ "net",
+ }
+ var raws []*rawPackage
+ for _, pkg := range pkgs {
+ raw := importRaw(src, pkg)
+ raws = append(raws, raw)
+ t.Run(pkg, func(t *testing.T) {
+ data := encodeModuleBytes([]*rawPackage{raw})
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkPkg(t, m, pkg, data)
+ })
+ }
+
+ // Check that a multi-package index works too.
+ t.Run("all", func(t *testing.T) {
+ data := encodeModuleBytes(raws)
+ m, err := fromBytes(src, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, pkg := range pkgs {
+ checkPkg(t, m, pkg, data)
+ }
+ })
+}
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
index 7ee4669e67..707f17e1ab 100644
--- a/src/cmd/go/internal/modindex/read.go
+++ b/src/cmd/go/internal/modindex/read.go
@@ -15,7 +15,6 @@ import (
"internal/godebug"
"internal/goroot"
"internal/unsafeheader"
- "math"
"path"
"path/filepath"
"runtime"
@@ -45,10 +44,9 @@ var enabled bool = godebug.Get("goindex") != "0"
// do the equivalent of build.Import of packages in the module and answer other
// questions based on the index file's data.
type Module struct {
- modroot string
- od offsetDecoder
- packages map[string]int // offsets of each package
- packagePaths []string // paths to package directories relative to modroot; these are the keys of packages
+ modroot string
+ d *decoder
+ n int // number of packages
}
// moduleHash returns an ActionID corresponding to the state of the module
@@ -141,6 +139,9 @@ func GetPackage(modroot, pkgdir string) (*IndexPackage, error) {
if !errors.Is(err, errNotFromModuleCache) {
return nil, err
}
+ if cfg.BuildContext.Compiler == "gccgo" && str.HasPathPrefix(modroot, cfg.GOROOTsrc) {
+ return nil, err // gccgo has no sources for GOROOT packages.
+ }
return openIndexPackage(modroot, pkgdir)
}
@@ -179,6 +180,7 @@ func openIndexModule(modroot string, ismodcache bool) (*Module, error) {
err error
}
r := mcache.Do(modroot, func() any {
+ fsys.Trace("openIndexModule", modroot)
id, err := moduleHash(modroot, ismodcache)
if err != nil {
return result{nil, err}
@@ -212,6 +214,7 @@ func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
err error
}
r := pcache.Do([2]string{modroot, pkgdir}, func() any {
+ fsys.Trace("openIndexPackage", pkgdir)
id, err := dirHash(modroot, pkgdir)
if err != nil {
return result{nil, err}
@@ -234,12 +237,30 @@ func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
return r.pkg, r.err
}
-// fromBytes returns a *Module given the encoded representation.
-func fromBytes(moddir string, data []byte) (mi *Module, err error) {
- if !enabled {
- panic("use of index")
- }
+var errCorrupt = errors.New("corrupt index")
+
+// protect marks the start of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// It should not be used for trivial accesses which would be
+// dwarfed by the overhead of the defer.
+func protect() bool {
+ return debug.SetPanicOnFault(true)
+}
+var isTest = false
+
+// unprotect marks the end of a large section of code that accesses the index.
+// It should be used as:
+//
+// defer unprotect(protect, &err)
+//
+// end looks for panics due to errCorrupt or bad mmap accesses.
+// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
+// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
+func unprotect(old bool, errp *error) {
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources.
@@ -247,97 +268,100 @@ func fromBytes(moddir string, data []byte) (mi *Module, err error) {
Addr() uintptr
}
- // set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
- // in case it's mmapped (the common case).
- old := debug.SetPanicOnFault(true)
- defer func() {
- debug.SetPanicOnFault(old)
- if e := recover(); e != nil {
- if _, ok := e.(addrer); ok {
- // This panic was almost certainly caused by SetPanicOnFault.
- err = fmt.Errorf("error reading module index: %v", e)
+ debug.SetPanicOnFault(old)
+
+ if e := recover(); e != nil {
+ if _, ok := e.(addrer); ok || e == errCorrupt {
+ // This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
+ err := fmt.Errorf("error reading module index: %v", e)
+ if errp != nil {
+ *errp = err
return
}
- // The panic was likely not caused by SetPanicOnFault.
- panic(e)
+ if isTest {
+ panic(err)
+ }
+ base.Fatalf("%v", err)
}
- }()
+ // The panic was likely not caused by SetPanicOnFault.
+ panic(e)
+ }
+}
+
+// fromBytes returns a *Module given the encoded representation.
+func fromBytes(moddir string, data []byte) (m *Module, err error) {
+ if !enabled {
+ panic("use of index")
+ }
- gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
- if string(gotVersion) != indexVersion {
- return nil, fmt.Errorf("bad index version string: %q", gotVersion)
+ defer unprotect(protect(), &err)
+
+ if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
+ return nil, errCorrupt
}
- stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
- st := newStringTable(data[stringTableOffset:])
- d := decoder{unread, st}
- numPackages := d.int()
-
- packagePaths := make([]string, numPackages)
- for i := range packagePaths {
- packagePaths[i] = d.string()
+
+ const hdr = len(indexVersion + "\n")
+ d := &decoder{data: data}
+ str := d.intAt(hdr)
+ if str < hdr+8 || len(d.data) < str {
+ return nil, errCorrupt
}
- packageOffsets := make([]int, numPackages)
- for i := range packageOffsets {
- packageOffsets[i] = d.int()
+ d.data, d.str = data[:str], d.data[str:]
+ // Check that string table looks valid.
+ // First string is empty string (length 0),
+ // and we leave a marker byte 0xFF at the end
+ // just to make sure that the file is not truncated.
+ if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
+ return nil, errCorrupt
}
- packages := make(map[string]int, numPackages)
- for i := range packagePaths {
- packages[packagePaths[i]] = packageOffsets[i]
+
+ n := d.intAt(hdr + 4)
+ if n < 0 || n > (len(d.data)-8)/8 {
+ return nil, errCorrupt
}
- return &Module{
+ m = &Module{
moddir,
- offsetDecoder{data, st},
- packages,
- packagePaths,
- }, nil
+ d,
+ n,
+ }
+ return m, nil
}
// packageFromBytes returns a *IndexPackage given the encoded representation.
func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
- if !enabled {
- panic("use of package index when not enabled")
+ m, err := fromBytes(modroot, data)
+ if err != nil {
+ return nil, err
}
-
- // SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
- // that all its errors satisfy this interface, we'll only check for these errors so that
- // we don't suppress panics that could have been produced from other sources.
- type addrer interface {
- Addr() uintptr
+ if m.n != 1 {
+ return nil, fmt.Errorf("corrupt single-package index")
}
+ return m.pkg(0), nil
+}
- // set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
- // in case it's mmapped (the common case).
- old := debug.SetPanicOnFault(true)
- defer func() {
- debug.SetPanicOnFault(old)
- if e := recover(); e != nil {
- if _, ok := e.(addrer); ok {
- // This panic was almost certainly caused by SetPanicOnFault.
- err = fmt.Errorf("error reading module index: %v", e)
- return
- }
- // The panic was likely not caused by SetPanicOnFault.
- panic(e)
- }
- }()
+// pkgDir returns the dir string of the i'th package in the index.
+func (m *Module) pkgDir(i int) string {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
+ }
+ return m.d.stringAt(12 + 8 + 8*i)
+}
- gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
- if string(gotVersion) != indexVersion {
- return nil, fmt.Errorf("bad index version string: %q", gotVersion)
+// pkgOff returns the offset of the data for the i'th package in the index.
+func (m *Module) pkgOff(i int) int {
+ if i < 0 || i >= m.n {
+ panic(errCorrupt)
}
- stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
- st := newStringTable(data[stringTableOffset:])
- d := &decoder{unread, st}
- p = decodePackage(d, offsetDecoder{data, st})
- p.modroot = modroot
- return p, nil
+ return m.d.intAt(12 + 8 + 8*i + 4)
}
-// Returns a list of directory paths, relative to the modroot, for
-// packages contained in the module index.
-func (mi *Module) Packages() []string {
- return mi.packagePaths
+// Walk calls f for each package in the index, passing the path to that package relative to the module root.
+func (m *Module) Walk(f func(path string)) {
+ defer unprotect(protect(), nil)
+ for i := 0; i < m.n; i++ {
+ f(m.pkgDir(i))
+ }
}
// relPath returns the path relative to the module's root.
@@ -347,11 +371,7 @@ func relPath(path, modroot string) string {
// Import is the equivalent of build.Import given the information in Module.
func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
- defer func() {
- if e := recover(); e != nil {
- err = fmt.Errorf("error reading module index: %v", e)
- }
- }()
+ defer unprotect(protect(), &err)
ctxt := (*Context)(&bctxt)
@@ -792,46 +812,44 @@ type IndexPackage struct {
var errCannotFindPackage = errors.New("cannot find package")
-// Package returns an IndexPackage constructed using the information in the Module.
-func (mi *Module) Package(path string) *IndexPackage {
- defer func() {
- if e := recover(); e != nil {
- base.Fatalf("error reading module index: %v", e)
- }
- }()
- offset, ok := mi.packages[path]
+// Package and returns finds the package with the given path (relative to the module root).
+// If the package does not exist, Package returns an IndexPackage that will return an
+// appropriate error from its methods.
+func (m *Module) Package(path string) *IndexPackage {
+ defer unprotect(protect(), nil)
+
+ i, ok := sort.Find(m.n, func(i int) int {
+ return strings.Compare(path, m.pkgDir(i))
+ })
if !ok {
- return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
+ return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
}
-
- // TODO(matloob): do we want to lock on the module index?
- d := mi.od.decoderAt(offset)
- p := decodePackage(d, mi.od)
- p.modroot = mi.modroot
- return p
+ return m.pkg(i)
}
-func decodePackage(d *decoder, od offsetDecoder) *IndexPackage {
- rp := new(IndexPackage)
- if errstr := d.string(); errstr != "" {
- rp.error = errors.New(errstr)
+// pkgAt returns the i'th IndexPackage in m.
+func (m *Module) pkg(i int) *IndexPackage {
+ r := m.d.readAt(m.pkgOff(i))
+ p := new(IndexPackage)
+ if errstr := r.string(); errstr != "" {
+ p.error = errors.New(errstr)
}
- rp.dir = d.string()
- numSourceFiles := d.uint32()
- rp.sourceFiles = make([]*sourceFile, numSourceFiles)
- for i := uint32(0); i < numSourceFiles; i++ {
- offset := d.uint32()
- rp.sourceFiles[i] = &sourceFile{
- od: od.offsetDecoderAt(offset),
+ p.dir = r.string()
+ p.sourceFiles = make([]*sourceFile, r.int())
+ for i := range p.sourceFiles {
+ p.sourceFiles[i] = &sourceFile{
+ d: m.d,
+ pos: r.int(),
}
}
- return rp
+ p.modroot = m.modroot
+ return p
}
// sourceFile represents the information of a given source file in the module index.
type sourceFile struct {
- od offsetDecoder // od interprets all offsets relative to the start of the source file's data
-
+ d *decoder // encoding of this source file
+ pos int // start of sourceFile encoding in d
onceReadImports sync.Once
savedImports []rawImport // saved imports so that they're only read once
}
@@ -851,73 +869,67 @@ const (
)
func (sf *sourceFile) error() string {
- return sf.od.stringAt(sourceFileError)
+ return sf.d.stringAt(sf.pos + sourceFileError)
}
func (sf *sourceFile) parseError() string {
- return sf.od.stringAt(sourceFileParseError)
+ return sf.d.stringAt(sf.pos + sourceFileParseError)
}
func (sf *sourceFile) synopsis() string {
- return sf.od.stringAt(sourceFileSynopsis)
+ return sf.d.stringAt(sf.pos + sourceFileSynopsis)
}
func (sf *sourceFile) name() string {
- return sf.od.stringAt(sourceFileName)
+ return sf.d.stringAt(sf.pos + sourceFileName)
}
func (sf *sourceFile) pkgName() string {
- return sf.od.stringAt(sourceFilePkgName)
+ return sf.d.stringAt(sf.pos + sourceFilePkgName)
}
func (sf *sourceFile) ignoreFile() bool {
- return sf.od.boolAt(sourceFileIgnoreFile)
+ return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
}
func (sf *sourceFile) binaryOnly() bool {
- return sf.od.boolAt(sourceFileBinaryOnly)
+ return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
}
func (sf *sourceFile) cgoDirectives() string {
- return sf.od.stringAt(sourceFileCgoDirectives)
+ return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
}
func (sf *sourceFile) goBuildConstraint() string {
- return sf.od.stringAt(sourceFileGoBuildConstraint)
+ return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
}
func (sf *sourceFile) plusBuildConstraints() []string {
- d := sf.od.decoderAt(sourceFileNumPlusBuildConstraints)
- n := d.int()
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ pos += 4
ret := make([]string, n)
for i := 0; i < n; i++ {
- ret[i] = d.string()
+ ret[i] = sf.d.stringAt(pos)
+ pos += 4
}
return ret
}
-func importsOffset(numPlusBuildConstraints int) int {
- // 4 bytes per uin32, add one to advance past numPlusBuildConstraints itself
- return sourceFileNumPlusBuildConstraints + 4*(numPlusBuildConstraints+1)
-}
-
func (sf *sourceFile) importsOffset() int {
- numPlusBuildConstraints := sf.od.intAt(sourceFileNumPlusBuildConstraints)
- return importsOffset(numPlusBuildConstraints)
-}
-
-func embedsOffset(importsOffset, numImports int) int {
- // 4 bytes per uint32; 1 to advance past numImports itself, and 5 uint32s per import
- return importsOffset + 4*(1+(5*numImports))
+ pos := sf.pos + sourceFileNumPlusBuildConstraints
+ n := sf.d.intAt(pos)
+ // each build constraint is 1 uint32
+ return pos + 4 + n*4
}
func (sf *sourceFile) embedsOffset() int {
- importsOffset := sf.importsOffset()
- numImports := sf.od.intAt(importsOffset)
- return embedsOffset(importsOffset, numImports)
+ pos := sf.importsOffset()
+ n := sf.d.intAt(pos)
+ // each import is 5 uint32s (string + tokpos)
+ return pos + 4 + n*(4*5)
}
func (sf *sourceFile) imports() []rawImport {
sf.onceReadImports.Do(func() {
importsOffset := sf.importsOffset()
- d := sf.od.decoderAt(importsOffset)
- numImports := d.int()
+ r := sf.d.readAt(importsOffset)
+ numImports := r.int()
ret := make([]rawImport, numImports)
for i := 0; i < numImports; i++ {
- ret[i].path = d.string()
- ret[i].position = d.tokpos()
+ ret[i] = rawImport{r.string(), r.tokpos()}
}
sf.savedImports = ret
})
@@ -926,132 +938,101 @@ func (sf *sourceFile) imports() []rawImport {
func (sf *sourceFile) embeds() []embed {
embedsOffset := sf.embedsOffset()
- d := sf.od.decoderAt(embedsOffset)
- numEmbeds := d.int()
+ r := sf.d.readAt(embedsOffset)
+ numEmbeds := r.int()
ret := make([]embed, numEmbeds)
for i := range ret {
- pattern := d.string()
- pos := d.tokpos()
- ret[i] = embed{pattern, pos}
+ ret[i] = embed{r.string(), r.tokpos()}
}
return ret
}
-// A decoder reads from the current position of the file and advances its position as it
-// reads.
-type decoder struct {
- b []byte
- st *stringTable
-}
+func asString(b []byte) string {
+ p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
-func (d *decoder) uint32() uint32 {
- n := binary.LittleEndian.Uint32(d.b[:4])
- d.b = d.b[4:]
- return n
-}
+ var s string
+ hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+ hdr.Data = p
+ hdr.Len = len(b)
-func (d *decoder) int() int {
- n := d.uint32()
- if int64(n) > math.MaxInt {
- base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
- }
- return int(n)
+ return s
}
-func (d *decoder) tokpos() token.Position {
- file := d.string()
- offset := d.int()
- line := d.int()
- column := d.int()
- return token.Position{
- Filename: file,
- Offset: offset,
- Line: line,
- Column: column,
- }
+// A decoder helps decode the index format.
+type decoder struct {
+ data []byte // data after header
+ str []byte // string table
}
-func (d *decoder) string() string {
- return d.st.string(d.int())
+// intAt returns the int at the given offset in d.data.
+func (d *decoder) intAt(off int) int {
+ if off < 0 || len(d.data)-off < 4 {
+ panic(errCorrupt)
+ }
+ i := binary.LittleEndian.Uint32(d.data[off : off+4])
+ if int32(i)>>31 != 0 {
+ panic(errCorrupt)
+ }
+ return int(i)
}
-// And offset decoder reads information offset from its position in the file.
-// It's either offset from the beginning of the index, or the beginning of a sourceFile's data.
-type offsetDecoder struct {
- b []byte
- st *stringTable
+// boolAt returns the bool at the given offset in d.data.
+func (d *decoder) boolAt(off int) bool {
+ return d.intAt(off) != 0
}
-func (od *offsetDecoder) uint32At(offset int) uint32 {
- if offset > len(od.b) {
- base.Fatalf("go: trying to read from index file at offset higher than file length. This indicates a corrupt offset file in the cache.")
- }
- return binary.LittleEndian.Uint32(od.b[offset:])
+// stringTableAt returns the string pointed at by the int at the given offset in d.data.
+func (d *decoder) stringAt(off int) string {
+ return d.stringTableAt(d.intAt(off))
}
-func (od *offsetDecoder) intAt(offset int) int {
- n := od.uint32At(offset)
- if int64(n) > math.MaxInt {
- base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
+// stringTableAt returns the string at the given offset in the string table d.str.
+func (d *decoder) stringTableAt(off int) string {
+ if off < 0 || off >= len(d.str) {
+ panic(errCorrupt)
}
- return int(n)
-}
-
-func (od *offsetDecoder) boolAt(offset int) bool {
- switch v := od.uint32At(offset); v {
- case 0:
- return false
- case 1:
- return true
- default:
- base.Fatalf("go: invalid bool value in index file encoding: %v", v)
+ s := d.str[off:]
+ v, n := binary.Uvarint(s)
+ if n <= 0 || v > uint64(len(s[n:])) {
+ panic(errCorrupt)
}
- panic("unreachable")
+ return asString(s[n : n+int(v)])
}
-func (od *offsetDecoder) stringAt(offset int) string {
- return od.st.string(od.intAt(offset))
+// A reader reads sequential fields from a section of the index format.
+type reader struct {
+ d *decoder
+ pos int
}
-func (od *offsetDecoder) decoderAt(offset int) *decoder {
- return &decoder{od.b[offset:], od.st}
+// readAt returns a reader starting at the given position in d.
+func (d *decoder) readAt(pos int) *reader {
+ return &reader{d, pos}
}
-func (od *offsetDecoder) offsetDecoderAt(offset uint32) offsetDecoder {
- return offsetDecoder{od.b[offset:], od.st}
+// int reads the next int.
+func (r *reader) int() int {
+ i := r.d.intAt(r.pos)
+ r.pos += 4
+ return i
}
-type stringTable struct {
- b []byte
+// string reads the next string.
+func (r *reader) string() string {
+ return r.d.stringTableAt(r.int())
}
-func newStringTable(b []byte) *stringTable {
- return &stringTable{b: b}
+// bool reads the next bool.
+func (r *reader) bool() bool {
+ return r.int() != 0
}
-func (st *stringTable) string(pos int) string {
- if pos == 0 {
- return ""
- }
-
- bb := st.b[pos:]
- i := bytes.IndexByte(bb, 0)
-
- if i == -1 {
- panic("reached end of string table trying to read string")
+// tokpos reads the next token.Position.
+func (r *reader) tokpos() token.Position {
+ return token.Position{
+ Filename: r.string(),
+ Offset: r.int(),
+ Line: r.int(),
+ Column: r.int(),
}
- s := asString(bb[:i])
-
- return s
-}
-
-func asString(b []byte) string {
- p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
-
- var s string
- hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
- hdr.Data = p
- hdr.Len = len(b)
-
- return s
}
diff --git a/src/cmd/go/internal/modindex/scan.go b/src/cmd/go/internal/modindex/scan.go
index 1ba7c0cad1..d3f059bcfc 100644
--- a/src/cmd/go/internal/modindex/scan.go
+++ b/src/cmd/go/internal/modindex/scan.go
@@ -46,6 +46,7 @@ func moduleWalkErr(modroot string, path string, info fs.FileInfo, err error) err
// encoded representation. It returns ErrNotIndexed if the module can't
// be indexed because it contains symlinks.
func indexModule(modroot string) ([]byte, error) {
+ fsys.Trace("indexModule", modroot)
var packages []*rawPackage
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if err := moduleWalkErr(modroot, path, info, err); err != nil {
@@ -72,6 +73,7 @@ func indexModule(modroot string) ([]byte, error) {
// encoded representation. It returns ErrNotIndexed if the package can't
// be indexed.
func indexPackage(modroot, pkgdir string) []byte {
+ fsys.Trace("indexPackage", pkgdir)
p := importRaw(modroot, relPath(pkgdir, modroot))
return encodePackageBytes(p)
}
diff --git a/src/cmd/go/internal/modindex/write.go b/src/cmd/go/internal/modindex/write.go
index 3408248bd9..7db1fb0870 100644
--- a/src/cmd/go/internal/modindex/write.go
+++ b/src/cmd/go/internal/modindex/write.go
@@ -1,54 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package modindex
import (
"cmd/go/internal/base"
"encoding/binary"
"go/token"
- "math"
"sort"
- "strings"
)
-const indexVersion = "go index v0"
+const indexVersion = "go index v1" // 11 bytes (plus \n), to align uint32s in index
// encodeModuleBytes produces the encoded representation of the module index.
// encodeModuleBytes may modify the packages slice.
func encodeModuleBytes(packages []*rawPackage) []byte {
e := newEncoder()
- e.Bytes([]byte(indexVersion))
- e.Bytes([]byte{'\n'})
+ e.Bytes([]byte(indexVersion + "\n"))
stringTableOffsetPos := e.Pos() // fill this at the end
e.Uint32(0) // string table offset
- e.Int(len(packages))
sort.Slice(packages, func(i, j int) bool {
return packages[i].dir < packages[j].dir
})
+ e.Int(len(packages))
+ packagesPos := e.Pos()
for _, p := range packages {
e.String(p.dir)
- }
- packagesOffsetPos := e.Pos()
- for range packages {
e.Int(0)
}
for i, p := range packages {
- e.IntAt(e.Pos(), packagesOffsetPos+4*i)
+ e.IntAt(e.Pos(), packagesPos+8*i+4)
encodePackage(e, p)
}
e.IntAt(e.Pos(), stringTableOffsetPos)
e.Bytes(e.stringTable)
+ e.Bytes([]byte{0xFF}) // end of string table marker
return e.b
}
func encodePackageBytes(p *rawPackage) []byte {
- e := newEncoder()
- e.Bytes([]byte(indexVersion))
- e.Bytes([]byte{'\n'})
- stringTableOffsetPos := e.Pos() // fill this at the end
- e.Uint32(0) // string table offset
- encodePackage(e, p)
- e.IntAt(e.Pos(), stringTableOffsetPos)
- e.Bytes(e.stringTable)
- return e.b
+ return encodeModuleBytes([]*rawPackage{p})
}
func encodePackage(e *encoder, p *rawPackage) {
@@ -126,9 +118,6 @@ func (e *encoder) Bytes(b []byte) {
}
func (e *encoder) String(s string) {
- if strings.IndexByte(s, 0) >= 0 {
- base.Fatalf("go: attempting to encode a string containing a null byte")
- }
if n, ok := e.strings[s]; ok {
e.Int(n)
return
@@ -136,8 +125,8 @@ func (e *encoder) String(s string) {
pos := len(e.stringTable)
e.strings[s] = pos
e.Int(pos)
+ e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
e.stringTable = append(e.stringTable, []byte(s)...)
- e.stringTable = append(e.stringTable, 0)
}
func (e *encoder) Bool(b bool) {
@@ -152,17 +141,18 @@ func (e *encoder) Uint32(n uint32) {
e.b = binary.LittleEndian.AppendUint32(e.b, n)
}
-// Int encodes n. Note that all ints are written to the index as uint32s.
+// Int encodes n. Note that all ints are written to the index as uint32s,
+// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
func (e *encoder) Int(n int) {
- if n < 0 || int64(n) > math.MaxUint32 {
- base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
}
e.Uint32(uint32(n))
}
func (e *encoder) IntAt(n int, at int) {
- if n < 0 || int64(n) > math.MaxUint32 {
- base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+ if n < 0 || int(int32(n)) != n {
+ base.Fatalf("go: attempting to write an int to the index that overflows int32")
}
binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
}
diff --git a/src/cmd/go/internal/modinfo/info.go b/src/cmd/go/internal/modinfo/info.go
index 19088352f0..b0adcbcfb3 100644
--- a/src/cmd/go/internal/modinfo/info.go
+++ b/src/cmd/go/internal/modinfo/info.go
@@ -4,7 +4,11 @@
package modinfo
-import "time"
+import (
+ "cmd/go/internal/modfetch/codehost"
+ "encoding/json"
+ "time"
+)
// Note that these structs are publicly visible (part of go list's API)
// and the fields are documented in the help text in ../list/list.go
@@ -12,6 +16,7 @@ import "time"
type ModulePublic struct {
Path string `json:",omitempty"` // module path
Version string `json:",omitempty"` // module version
+ Query string `json:",omitempty"` // version query corresponding to this version
Versions []string `json:",omitempty"` // available module versions
Replace *ModulePublic `json:",omitempty"` // replaced by this module
Time *time.Time `json:",omitempty"` // time version was created
@@ -24,12 +29,27 @@ type ModulePublic struct {
Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u)
Deprecated string `json:",omitempty"` // deprecation message, if any (with -u)
Error *ModuleError `json:",omitempty"` // error loading module
+
+ Origin *codehost.Origin `json:",omitempty"` // provenance of module
+ Reuse bool `json:",omitempty"` // reuse of old module info is safe
}
type ModuleError struct {
Err string // error text
}
+type moduleErrorNoMethods ModuleError
+
+// UnmarshalJSON accepts both {"Err":"text"} and "text",
+// so that the output of go mod download -json can still
+// be unmarshalled into a ModulePublic during -reuse processing.
+func (e *ModuleError) UnmarshalJSON(data []byte) error {
+ if len(data) > 0 && data[0] == '"' {
+ return json.Unmarshal(data, &e.Err)
+ }
+ return json.Unmarshal(data, (*moduleErrorNoMethods)(e))
+}
+
func (m *ModulePublic) String() string {
s := m.Path
versionString := func(mm *ModulePublic) string {
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index 0799fec35c..555d4b3c63 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -17,6 +17,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modindex"
"cmd/go/internal/modinfo"
"cmd/go/internal/search"
@@ -60,7 +61,7 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli
}
rs := LoadModFile(ctx)
- return moduleInfo(ctx, rs, m, 0)
+ return moduleInfo(ctx, rs, m, 0, nil)
}
// PackageModRoot returns the module root directory for the module that provides
@@ -90,7 +91,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
if i := strings.Index(path, "@"); i >= 0 {
m := module.Version{Path: path[:i], Version: path[i+1:]}
- return moduleInfo(ctx, nil, m, 0)
+ return moduleInfo(ctx, nil, m, 0, nil)
}
rs := LoadModFile(ctx)
@@ -119,7 +120,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
}
}
- return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0)
+ return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil)
}
// addUpdate fills in m.Update if an updated version is available.
@@ -130,10 +131,15 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed)
var noVersionErr *NoMatchingVersionError
- if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) {
+ if errors.Is(err, ErrDisallowed) ||
+ errors.Is(err, fs.ErrNotExist) ||
+ errors.As(err, &noVersionErr) {
// Ignore "not found" and "no matching version" errors.
// This means the proxy has no matching version or no versions at all.
//
+ // Ignore "disallowed" errors. This means the current version is
+ // excluded or retracted and there are no higher allowed versions.
+ //
// We should report other errors though. An attacker that controls the
// network shouldn't be able to hide versions by interfering with
// the HTTPS connection. An attacker that controls the proxy may still
@@ -156,6 +162,45 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
}
}
+// mergeOrigin merges two origins,
+// returning and possibly modifying one of its arguments.
+// If the two origins conflict, mergeOrigin returns a non-specific one
+// that will not pass CheckReuse.
+// If m1 or m2 is nil, the other is returned unmodified.
+// But if m1 or m2 is non-nil and uncheckable, the result is also uncheckable,
+// to preserve uncheckability.
+func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin {
+ if m1 == nil {
+ return m2
+ }
+ if m2 == nil {
+ return m1
+ }
+ if !m1.Checkable() {
+ return m1
+ }
+ if !m2.Checkable() {
+ return m2
+ }
+ if m2.TagSum != "" {
+ if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) {
+ m1.ClearCheckable()
+ return m1
+ }
+ m1.TagSum = m2.TagSum
+ m1.TagPrefix = m2.TagPrefix
+ }
+ if m2.Hash != "" {
+ if m1.Hash != "" && (m1.Hash != m2.Hash || m1.Ref != m2.Ref) {
+ m1.ClearCheckable()
+ return m1
+ }
+ m1.Hash = m2.Hash
+ m1.Ref = m2.Ref
+ }
+ return m1
+}
+
// addVersions fills in m.Versions with the list of known versions.
// Excluded versions will be omitted. If listRetracted is false, retracted
// versions will also be omitted.
@@ -164,11 +209,12 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo
if listRetracted {
allowed = CheckExclusions
}
- var err error
- m.Versions, err = versions(ctx, m.Path, allowed)
+ v, origin, err := versions(ctx, m.Path, allowed)
if err != nil && m.Error == nil {
m.Error = &modinfo.ModuleError{Err: err.Error()}
}
+ m.Versions = v
+ m.Origin = mergeOrigin(m.Origin, origin)
}
// addRetraction fills in m.Retracted if the module was retracted by its author.
@@ -230,7 +276,7 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) {
// moduleInfo returns information about module m, loaded from the requirements
// in rs (which may be nil to indicate that m was not loaded from a requirement
// graph).
-func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode) *modinfo.ModulePublic {
+func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic {
if m.Version == "" && MainModules.Contains(m.Path) {
info := &modinfo.ModulePublic{
Path: m.Path,
@@ -260,6 +306,15 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
// completeFromModCache fills in the extra fields in m using the module cache.
completeFromModCache := func(m *modinfo.ModulePublic) {
+ if old := reuse[module.Version{Path: m.Path, Version: m.Version}]; old != nil {
+ if err := checkReuse(ctx, m.Path, old.Origin); err == nil {
+ *m = *old
+ m.Query = ""
+ m.Dir = ""
+ return
+ }
+ }
+
checksumOk := func(suffix string) bool {
return rs == nil || m.Version == "" || cfg.BuildMod == "mod" ||
modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix})
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 5b8d6051f3..cde4953afa 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -397,7 +397,6 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
seen := map[module.Version]bool{}
for _, m := range roots {
hasDepsInAll[m.Path] = true
- seen[m] = true
}
// This loop will terminate because it will call enqueue on each version of
// each dependency of the modules in hasDepsInAll at most once (and only
@@ -406,11 +405,11 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
needsEnqueueing := map[module.Version]bool{}
for p := range hasDepsInAll {
m := module.Version{Path: p, Version: mg.g.Selected(p)}
- reqs, ok := mg.g.RequiredBy(m)
- if !ok {
+ if !seen[m] {
needsEnqueueing[m] = true
continue
}
+ reqs, _ := mg.g.RequiredBy(m)
for _, r := range reqs {
s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)}
if cmpVersion(s.Version, r.Version) > 0 && !seen[s] {
diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go
index c556664c35..f6937a48b4 100644
--- a/src/cmd/go/internal/modload/edit.go
+++ b/src/cmd/go/internal/modload/edit.go
@@ -509,7 +509,7 @@ func (l *versionLimiter) UpgradeToward(ctx context.Context, m module.Version) er
}
if l.check(m, l.pruning).isDisqualified() {
- candidates, err := versions(ctx, m.Path, CheckAllowed)
+ candidates, _, err := versions(ctx, m.Path, CheckAllowed)
if err != nil {
// This is likely a transient error reaching the repository,
// rather than a permanent error with the retrieved version.
diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go
index f782cd93db..e822d06504 100644
--- a/src/cmd/go/internal/modload/list.go
+++ b/src/cmd/go/internal/modload/list.go
@@ -5,15 +5,19 @@
package modload
import (
+ "bytes"
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io"
"os"
"runtime"
"strings"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
+ "cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modinfo"
"cmd/go/internal/search"
@@ -34,13 +38,44 @@ const (
// along with any error preventing additional matches from being identified.
//
// The returned slice can be nonempty even if the error is non-nil.
-func ListModules(ctx context.Context, args []string, mode ListMode) ([]*modinfo.ModulePublic, error) {
- rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode)
+func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) {
+ var reuse map[module.Version]*modinfo.ModulePublic
+ if reuseFile != "" {
+ data, err := os.ReadFile(reuseFile)
+ if err != nil {
+ return nil, err
+ }
+ dec := json.NewDecoder(bytes.NewReader(data))
+ reuse = make(map[module.Version]*modinfo.ModulePublic)
+ for {
+ var m modinfo.ModulePublic
+ if err := dec.Decode(&m); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("parsing %s: %v", reuseFile, err)
+ }
+ if m.Origin == nil || !m.Origin.Checkable() {
+ // Nothing to check to validate reuse.
+ continue
+ }
+ m.Reuse = true
+ reuse[module.Version{Path: m.Path, Version: m.Version}] = &m
+ if m.Query != "" {
+ reuse[module.Version{Path: m.Path, Version: m.Query}] = &m
+ }
+ }
+ }
+
+ rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse)
type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0))
if mode != 0 {
for _, m := range mods {
+ if m.Reuse {
+ continue
+ }
add := func(m *modinfo.ModulePublic) {
sem <- token{}
go func() {
@@ -80,11 +115,11 @@ func ListModules(ctx context.Context, args []string, mode ListMode) ([]*modinfo.
return mods, err
}
-func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) {
+func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) {
if len(args) == 0 {
var ms []*modinfo.ModulePublic
for _, m := range MainModules.Versions() {
- ms = append(ms, moduleInfo(ctx, rs, m, mode))
+ ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse))
}
return rs, ms, nil
}
@@ -157,12 +192,17 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// specific revision or used 'go list -retracted'.
allowed = nil
}
- info, err := Query(ctx, path, vers, current, allowed)
+ info, err := queryReuse(ctx, path, vers, current, allowed, reuse)
if err != nil {
+ var origin *codehost.Origin
+ if info != nil {
+ origin = info.Origin
+ }
mods = append(mods, &modinfo.ModulePublic{
Path: path,
Version: vers,
Error: modinfoError(path, vers, err),
+ Origin: origin,
})
continue
}
@@ -171,7 +211,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// *Requirements instead.
var noRS *Requirements
- mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode)
+ mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse)
+ if vers != mod.Version {
+ mod.Query = vers
+ }
+ mod.Origin = info.Origin
mods = append(mods, mod)
continue
}
@@ -200,7 +244,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
continue
}
if v != "none" {
- mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode))
+ mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse))
} else if cfg.BuildMod == "vendor" {
// In vendor mode, we can't determine whether a missing module is “a
// known dependency” because the module graph is incomplete.
@@ -229,7 +273,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
matched = true
if !matchedModule[m] {
matchedModule[m] = true
- mods = append(mods, moduleInfo(ctx, rs, m, mode))
+ mods = append(mods, moduleInfo(ctx, rs, m, mode, reuse))
}
}
}
diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go
index 588bcf4bdc..ea1c21b4f1 100644
--- a/src/cmd/go/internal/modload/mvs.go
+++ b/src/cmd/go/internal/modload/mvs.go
@@ -11,6 +11,7 @@ import (
"sort"
"cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
@@ -78,11 +79,10 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) {
return m, nil
}
-func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string, error) {
+func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) {
// Note: modfetch.Lookup and repo.Versions are cached,
// so there's no need for us to add extra caching here.
- var versions []string
- err := modfetch.TryProxies(func(proxy string) error {
+ err = modfetch.TryProxies(func(proxy string) error {
repo, err := lookupRepo(proxy, path)
if err != nil {
return err
@@ -91,8 +91,8 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string,
if err != nil {
return err
}
- allowedVersions := make([]string, 0, len(allVersions))
- for _, v := range allVersions {
+ allowedVersions := make([]string, 0, len(allVersions.List))
+ for _, v := range allVersions.List {
if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil {
allowedVersions = append(allowedVersions, v)
} else if !errors.Is(err, ErrDisallowed) {
@@ -100,9 +100,10 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string,
}
}
versions = allowedVersions
+ origin = allVersions.Origin
return nil
})
- return versions, err
+ return versions, origin, err
}
// previousVersion returns the tagged version of m.Path immediately prior to
@@ -117,7 +118,7 @@ func previousVersion(m module.Version) (module.Version, error) {
return module.Version{Path: m.Path, Version: "none"}, nil
}
- list, err := versions(context.TODO(), m.Path, CheckAllowed)
+ list, _, err := versions(context.TODO(), m.Path, CheckAllowed)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return module.Version{Path: m.Path, Version: "none"}, nil
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index ae5304f87e..01df14fca4 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -20,6 +20,8 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/imports"
"cmd/go/internal/modfetch"
+ "cmd/go/internal/modfetch/codehost"
+ "cmd/go/internal/modinfo"
"cmd/go/internal/search"
"cmd/go/internal/str"
"cmd/go/internal/trace"
@@ -72,18 +74,39 @@ import (
//
// If path is the path of the main module and the query is "latest",
// Query returns Target.Version as the version.
+//
+// Query often returns a non-nil *RevInfo with a non-nil error,
+// to provide an info.Origin that can allow the error to be cached.
func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.Query "+path)
defer span.Done()
+ return queryReuse(ctx, path, query, current, allowed, nil)
+}
+
+// queryReuse is like Query but also takes a map of module info that can be reused
+// if the validation criteria in Origin are met.
+func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
var info *modfetch.RevInfo
err := modfetch.TryProxies(func(proxy string) (err error) {
- info, err = queryProxy(ctx, proxy, path, query, current, allowed)
+ info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse)
return err
})
return info, err
}
+// checkReuse checks whether a revision of a given module or a version list
+// for a given module may be reused, according to the information in origin.
+func checkReuse(ctx context.Context, path string, old *codehost.Origin) error {
+ return modfetch.TryProxies(func(proxy string) error {
+ repo, err := lookupRepo(proxy, path)
+ if err != nil {
+ return err
+ }
+ return repo.CheckReuse(old)
+ })
+}
+
// AllowedFunc is used by Query and other functions to filter out unsuitable
// versions, for example, those listed in exclude directives in the main
// module's go.mod file.
@@ -106,7 +129,7 @@ func (queryDisabledError) Error() string {
return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
}
-func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
+func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query)
defer span.Done()
@@ -137,6 +160,19 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return nil, err
}
+ if old := reuse[module.Version{Path: path, Version: query}]; old != nil {
+ if err := repo.CheckReuse(old.Origin); err == nil {
+ info := &modfetch.RevInfo{
+ Version: old.Version,
+ Origin: old.Origin,
+ }
+ if old.Time != nil {
+ info.Time = *old.Time
+ }
+ return info, nil
+ }
+ }
+
// Parse query to detect parse errors (and possibly handle query)
// before any network I/O.
qm, err := newQueryMatcher(path, query, current, allowed)
@@ -161,7 +197,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
}
}
if err != nil {
- return nil, queryErr
+ return info, queryErr
}
}
if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) {
@@ -177,15 +213,23 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
if err != nil {
return nil, err
}
- releases, prereleases, err := qm.filterVersions(ctx, versions)
+ revErr := &modfetch.RevInfo{Origin: versions.Origin} // RevInfo to return with error
+
+ releases, prereleases, err := qm.filterVersions(ctx, versions.List)
if err != nil {
- return nil, err
+ return revErr, err
}
lookup := func(v string) (*modfetch.RevInfo, error) {
rev, err := repo.Stat(v)
+ // Stat can return a non-nil rev and a non-nil err,
+ // in order to provide origin information to make the error cacheable.
+ if rev == nil && err != nil {
+ return revErr, err
+ }
+ rev.Origin = mergeOrigin(rev.Origin, versions.Origin)
if err != nil {
- return nil, err
+ return rev, err
}
if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() {
@@ -210,9 +254,14 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
currentTime, err := module.PseudoVersionTime(current)
if err == nil && rev.Time.Before(currentTime) {
if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
- return nil, err
+ return revErr, err
+ }
+ info, err := repo.Stat(current)
+ if info == nil && err != nil {
+ return revErr, err
}
- return repo.Stat(current)
+ info.Origin = mergeOrigin(info.Origin, versions.Origin)
+ return info, err
}
}
@@ -242,7 +291,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return lookup(latest.Version)
}
} else if !errors.Is(err, fs.ErrNotExist) {
- return nil, err
+ return revErr, err
}
}
@@ -254,7 +303,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return lookup(current)
}
- return nil, &NoMatchingVersionError{query: query, current: current}
+ return revErr, &NoMatchingVersionError{query: query, current: current}
}
// IsRevisionQuery returns true if vers is a version query that may refer to
@@ -663,7 +712,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
pathCurrent := current(path)
r.Mod.Path = path
- r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed)
+ r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil)
if err != nil {
return r, err
}
@@ -991,7 +1040,8 @@ func versionHasGoMod(_ context.Context, m module.Version) (bool, error) {
// available versions, but cannot fetch specific source files.
type versionRepo interface {
ModulePath() string
- Versions(prefix string) ([]string, error)
+ CheckReuse(*codehost.Origin) error
+ Versions(prefix string) (*modfetch.Versions, error)
Stat(rev string) (*modfetch.RevInfo, error)
Latest() (*modfetch.RevInfo, error)
}
@@ -1023,8 +1073,13 @@ type emptyRepo struct {
var _ versionRepo = emptyRepo{}
-func (er emptyRepo) ModulePath() string { return er.path }
-func (er emptyRepo) Versions(prefix string) ([]string, error) { return nil, nil }
+func (er emptyRepo) ModulePath() string { return er.path }
+func (er emptyRepo) CheckReuse(old *codehost.Origin) error {
+ return fmt.Errorf("empty repo")
+}
+func (er emptyRepo) Versions(prefix string) (*modfetch.Versions, error) {
+ return &modfetch.Versions{}, nil
+}
func (er emptyRepo) Stat(rev string) (*modfetch.RevInfo, error) { return nil, er.err }
func (er emptyRepo) Latest() (*modfetch.RevInfo, error) { return nil, er.err }
@@ -1042,15 +1097,22 @@ var _ versionRepo = (*replacementRepo)(nil)
func (rr *replacementRepo) ModulePath() string { return rr.repo.ModulePath() }
+func (rr *replacementRepo) CheckReuse(old *codehost.Origin) error {
+ return fmt.Errorf("replacement repo")
+}
+
// Versions returns the versions from rr.repo augmented with any matching
// replacement versions.
-func (rr *replacementRepo) Versions(prefix string) ([]string, error) {
+func (rr *replacementRepo) Versions(prefix string) (*modfetch.Versions, error) {
repoVersions, err := rr.repo.Versions(prefix)
- if err != nil && !errors.Is(err, os.ErrNotExist) {
- return nil, err
+ if err != nil {
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ repoVersions = new(modfetch.Versions)
}
- versions := repoVersions
+ versions := repoVersions.List
for _, mm := range MainModules.Versions() {
if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 {
path := rr.ModulePath()
@@ -1062,15 +1124,15 @@ func (rr *replacementRepo) Versions(prefix string) ([]string, error) {
}
}
- if len(versions) == len(repoVersions) { // No replacement versions added.
- return versions, nil
+ if len(versions) == len(repoVersions.List) { // replacement versions added
+ return repoVersions, nil
}
sort.Slice(versions, func(i, j int) bool {
return semver.Compare(versions[i], versions[j]) < 0
})
str.Uniq(&versions)
- return versions, nil
+ return &modfetch.Versions{List: versions}, nil
}
func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) {
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
index 856390a0f2..b2ac7f22b1 100644
--- a/src/cmd/go/internal/modload/search.go
+++ b/src/cmd/go/internal/modload/search.go
@@ -216,21 +216,20 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
// is the module's root directory on disk, index is the modindex.Module for the
// module, and importPathRoot is the module's path prefix.
func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
-loopPackages:
- for _, reldir := range index.Packages() {
+ index.Walk(func(reldir string) {
// Avoid .foo, _foo, and testdata subdirectory trees.
p := reldir
for {
elem, rest, found := strings.Cut(p, string(filepath.Separator))
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
- continue loopPackages
+ return
}
if found && elem == "vendor" {
// Ignore this path if it contains the element "vendor" anywhere
// except for the last element (packages named vendor are allowed
// for historical reasons). Note that found is true when this
// isn't the last path element.
- continue loopPackages
+ return
}
if !found {
// Didn't find the separator, so we're considering the last element.
@@ -241,12 +240,12 @@ loopPackages:
// Don't use GOROOT/src.
if reldir == "" && importPathRoot == "" {
- continue
+ return
}
name := path.Join(importPathRoot, filepath.ToSlash(reldir))
if !treeCanMatch(name) {
- continue
+ return
}
if !have[name] {
@@ -257,7 +256,7 @@ loopPackages:
}
}
}
- }
+ })
}
// MatchInModule identifies the packages matching the given pattern within the
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index feb82d8d38..42745d9928 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -151,11 +151,10 @@ and test commands:
For example, when building with a non-standard configuration,
use -pkgdir to keep generated packages in a separate location.
-tags tag,list
- a comma-separated list of build tags to consider satisfied during the
- build. For more information about build tags, see the description of
- build constraints in the documentation for the go/build package.
- (Earlier versions of Go used a space-separated list, and that form
- is deprecated but still recognized.)
+ a comma-separated list of additional build tags to consider satisfied
+ during the build. For more information about build tags, see
+ 'go help buildconstraint'. (Earlier versions of Go used a
+ space-separated list, and that form is deprecated but still recognized.)
-trimpath
remove all file system paths from the resulting executable.
Instead of absolute file system paths, the recorded file names
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index 5bf548db32..255ff3a0c5 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -211,7 +211,11 @@ func buildModeInit() {
codegenArg = "-shared"
ldBuildmode = "pie"
case "windows":
- ldBuildmode = "pie"
+ if cfg.BuildRace {
+ ldBuildmode = "exe"
+ } else {
+ ldBuildmode = "pie"
+ }
case "ios":
codegenArg = "-shared"
ldBuildmode = "pie"
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 5e82929f19..809dfb452f 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -163,7 +163,7 @@ func (ts *testScript) setup() {
ts.cd = filepath.Join(ts.workdir, "gopath/src")
ts.env = []string{
"WORK=" + ts.workdir, // must be first for ts.abbrev
- "PATH=" + testBin + string(filepath.ListSeparator) + os.Getenv("PATH"),
+ pathEnvName() + "=" + testBin + string(filepath.ListSeparator) + os.Getenv(pathEnvName()),
homeEnvName() + "=/no-home",
"CCACHE_DISABLE=1", // ccache breaks with non-existent HOME
"GOARCH=" + runtime.GOARCH,
@@ -187,8 +187,6 @@ func (ts *testScript) setup() {
tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"),
"devnull=" + os.DevNull,
"goversion=" + goVersion(ts),
- ":=" + string(os.PathListSeparator),
- "/=" + string(os.PathSeparator),
"CMDGO_TEST_RUN_MAIN=true",
}
if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" {
@@ -203,10 +201,6 @@ func (ts *testScript) setup() {
ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic")
}
- if runtime.GOOS == "plan9" {
- ts.env = append(ts.env, "path="+testBin+string(filepath.ListSeparator)+os.Getenv("path"))
- }
-
for _, key := range extraEnvKeys {
if val := os.Getenv(key); val != "" {
ts.env = append(ts.env, key+"="+val)
@@ -219,6 +213,10 @@ func (ts *testScript) setup() {
ts.envMap[kv[:i]] = kv[i+1:]
}
}
+ // Add entries for ${:} and ${/} to make it easier to write platform-independent
+ // environment variables.
+ ts.envMap["/"] = string(os.PathSeparator)
+ ts.envMap[":"] = string(os.PathListSeparator)
fmt.Fprintf(&ts.log, "# (%s)\n", time.Now().UTC().Format(time.RFC3339))
ts.mark = ts.log.Len()
@@ -1264,12 +1262,7 @@ func (ts *testScript) lookPath(command string) (string, error) {
}
}
- pathName := "PATH"
- if runtime.GOOS == "plan9" {
- pathName = "path"
- }
-
- for _, dir := range strings.Split(ts.envMap[pathName], string(filepath.ListSeparator)) {
+ for _, dir := range strings.Split(ts.envMap[pathEnvName()], string(filepath.ListSeparator)) {
if searchExt {
ents, err := os.ReadDir(dir)
if err != nil {
diff --git a/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt
new file mode 100644
index 0000000000..466afc5765
--- /dev/null
+++ b/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt
@@ -0,0 +1,9 @@
+-- .mod --
+module example.com/retract/noupgrade
+
+go 1.19
+
+retract v1.0.0 // bad
+
+-- .info --
+{"Version":"v1.0.0"}
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index c575bff1a5..e52917684f 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -41,12 +41,19 @@ Scripts also have access to these other environment variables:
GODEBUG=<actual GODEBUG>
devnull=<value of os.DevNull>
goversion=<current Go version; for example, 1.12>
- :=<OS-specific path list separator>
-The scripts' supporting files are unpacked relative to $GOPATH/src (aka $WORK/gopath/src)
-and then the script begins execution in that directory as well. Thus the example above runs
-in $WORK/gopath/src with GOPATH=$WORK/gopath and $WORK/gopath/src/hello.go
-containing the listed contents.
+On Plan 9, the variables $path and $home are set instead of $PATH and $HOME.
+On Windows, the variables $USERPROFILE and $TMP are set instead of
+$HOME and $TMPDIR.
+
+In addition, variables named ':' and '/' are expanded within script arguments
+(expanding to the value of os.PathListSeparator and os.PathSeparator
+respectively) but are not inherited in subprocess environments.
+
+The scripts' supporting files are unpacked relative to $GOPATH/src
+(aka $WORK/gopath/src) and then the script begins execution in that directory as
+well. Thus the example above runs in $WORK/gopath/src with GOPATH=$WORK/gopath
+and $WORK/gopath/src/hello.go containing the listed contents.
The lines at the top of the script are a sequence of commands to be executed
by a tiny script engine in ../../script_test.go (not the system shell).
diff --git a/src/cmd/go/testdata/script/build_buildvcs_auto.txt b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
index 9eac568045..dd9eef5f82 100644
--- a/src/cmd/go/testdata/script/build_buildvcs_auto.txt
+++ b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
@@ -6,11 +6,15 @@
cd sub
exec git init .
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
exec git add sub.go
exec git commit -m 'initial state'
cd ..
exec git init
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
exec git submodule add ./sub
exec git add go.mod example.go
exec git commit -m 'initial state'
diff --git a/src/cmd/go/testdata/script/build_overlay.txt b/src/cmd/go/testdata/script/build_overlay.txt
index 56e812f44b..70cd7f8c7a 100644
--- a/src/cmd/go/testdata/script/build_overlay.txt
+++ b/src/cmd/go/testdata/script/build_overlay.txt
@@ -47,20 +47,24 @@ go build -overlay overlay.json -o main_call_asm$GOEXE ./call_asm
exec ./main_call_asm$GOEXE
! stdout .
-# Change the contents of a file in the overlay and ensure that makes the target stale
-go install -overlay overlay.json ./test_cache
-go list -overlay overlay.json -f '{{.Stale}}' ./test_cache
-stdout '^false$'
-cp overlay/test_cache_different.go overlay/test_cache.go
-go list -overlay overlay.json -f '{{.Stale}}' ./test_cache
-stdout '^true$'
-
[cgo] go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace
[cgo] cp stdout compiled_cgo_sources.txt
[cgo] go run ../print_line_comments.go compiled_cgo_sources.txt
[cgo] stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go
[cgo] ! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c
+# Change the contents of a file in the overlay and ensure that makes the target stale
+env OLD_GOCACHE=$GOCACHE
+env GOCACHE=$WORK/cache # use a fresh cache so that multiple runs of the test don't interfere
+go build -x -overlay overlay.json ./test_cache
+stderr '(compile|gccgo)( |\.exe).*test_cache.go'
+go build -x -overlay overlay.json ./test_cache
+! stderr '(compile|gccgo)( |\.exe).*test_cache.go' # cached
+cp overlay/test_cache_different.go overlay/test_cache.go
+go build -x -overlay overlay.json ./test_cache
+stderr '(compile|gccgo)( |\.exe).*test_cache.go' # not cached
+env CACHE=$OLD_GOCACHE
+
# Run same tests but with gccgo.
env GO111MODULE=off
[!exec:gccgo] stop
diff --git a/src/cmd/go/testdata/script/fsys_walk.txt b/src/cmd/go/testdata/script/fsys_walk.txt
new file mode 100644
index 0000000000..9d1a9451ff
--- /dev/null
+++ b/src/cmd/go/testdata/script/fsys_walk.txt
@@ -0,0 +1,6 @@
+# Test that go list prefix... does not read directories not beginning with prefix.
+env GODEBUG=gofsystrace=1
+go list m...
+stderr mime
+stderr mime[\\/]multipart
+! stderr archive
diff --git a/src/cmd/go/testdata/script/get_issue16471.txt b/src/cmd/go/testdata/script/get_issue16471.txt
new file mode 100644
index 0000000000..2a2225a444
--- /dev/null
+++ b/src/cmd/go/testdata/script/get_issue16471.txt
@@ -0,0 +1,22 @@
+[!net] skip
+[!exec:git] skip
+
+env GO111MODULE=off
+
+cd rsc.io/go-get-issue-10952
+
+exec git init
+exec git add foo.go
+exec git config user.name Gopher
+exec git config user.email gopher@golang.org
+exec git commit -a -m 'initial commit'
+exec git remote add origin https://github.com/golang/go-get-issue-10952
+
+exec git status
+
+! go get -x -u rsc.io/go-get-issue-10952
+stderr '^package rsc.io/go-get-issue-10952: rsc\.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/golang/go-get-issue-10952$'
+
+-- rsc.io/go-get-issue-10952/foo.go --
+// Junk package to test go get.
+package foo
diff --git a/src/cmd/go/testdata/script/index.txt b/src/cmd/go/testdata/script/index.txt
new file mode 100644
index 0000000000..6a2d13c8b5
--- /dev/null
+++ b/src/cmd/go/testdata/script/index.txt
@@ -0,0 +1,6 @@
+# Check that standard library packages are cached.
+go list -json math # refresh cache
+env GODEBUG=gofsystrace=1,gofsystracelog=fsys.log
+go list -json math
+! grep math/abs.go fsys.log
+grep 'openIndexPackage .*[\\/]math$' fsys.log
diff --git a/src/cmd/go/testdata/script/list_permissions.txt b/src/cmd/go/testdata/script/list_perm.txt
index f65896ca14..3b850ef3cc 100644
--- a/src/cmd/go/testdata/script/list_permissions.txt
+++ b/src/cmd/go/testdata/script/list_perm.txt
@@ -11,12 +11,11 @@ stdout '^example.com/noread$'
go list ./empty/...
stderr 'matched no packages'
-[root] stop # Root typically ignores file permissions.
-
# Make the directory ./noread unreadable, and verify that 'go list' reports an
# explicit error for a pattern that should match it (rather than treating it as
# equivalent to an empty directory).
+[root] stop # Root typically ignores file permissions.
[windows] skip # Does not have Unix-style directory permissions.
[plan9] skip # Might not have Unix-style directory permissions.
diff --git a/src/cmd/go/testdata/script/mod_download_issue51114.txt b/src/cmd/go/testdata/script/mod_download_issue51114.txt
new file mode 100644
index 0000000000..92479c6dd3
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_download_issue51114.txt
@@ -0,0 +1,21 @@
+[short] skip
+[!exec:git] skip
+[!net] skip
+[!linux] skip # Uses XDG_CONFIG_HOME
+
+env GIT_CONFIG_GLOBAL=$WORK/.gitconfig
+env GOPROXY=direct
+
+! go mod download
+stderr '^go: github\.com/golang/notexist/subdir@v0.1.0: reading github\.com/golang/notexist/subdir/go\.mod at revision subdir/v0\.1\.0: '
+
+-- go.mod --
+module test
+
+go 1.18
+
+require github.com/golang/notexist/subdir v0.1.0
+
+-- $WORK/.gitconfig --
+[url "git@github.com:"]
+ insteadOf = https://github.com/
diff --git a/src/cmd/go/testdata/script/mod_download_private_vcs.txt b/src/cmd/go/testdata/script/mod_download_private_vcs.txt
index e126793907..da9fe0290b 100644
--- a/src/cmd/go/testdata/script/mod_download_private_vcs.txt
+++ b/src/cmd/go/testdata/script/mod_download_private_vcs.txt
@@ -22,22 +22,18 @@ stderr '^If this is a private repository, see https://golang.org/doc/faq#git_htt
! stderr 'unknown revision'
! stdout .
-[!linux] stop # Needs XDG_CONFIG_HOME.
[!exec:false] stop
# Test that Git clone errors will be shown to the user instead of a generic
# "unknown revision" error. To do this we want to force git ls-remote to return
# an error we don't already have special handling for. See golang/go#42751.
-#
-# Set XDG_CONFIG_HOME to tell Git where to look for the git config file listed
-# below, which turns on ssh.
-env XDG_CONFIG_HOME=$TMPDIR
+env HOME=$WORK${/}home${/}gopher
env GIT_SSH_COMMAND=false
! go install github.com/golang/nonexist@master
stderr 'fatal: Could not read from remote repository.'
! stderr 'unknown revision'
! stdout .
--- $TMPDIR/git/config --
+-- $WORK/home/gopher/.gitconfig --
[url "git@github.com:"]
- insteadOf = https://github.com/
+ insteadOf = https://github.com/
diff --git a/src/cmd/go/testdata/script/mod_perm.txt b/src/cmd/go/testdata/script/mod_perm.txt
new file mode 100644
index 0000000000..f5382eceaf
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_perm.txt
@@ -0,0 +1,23 @@
+# go list should work in ordinary conditions.
+go list ./...
+! stdout _data
+
+# skip in conditions where chmod 0 may not work.
+# plan9 should be fine, but copied from list_perm.txt unchanged.
+[root] skip
+[windows] skip
+[plan9] skip
+
+# go list should work with unreadable _data directory.
+chmod 0 _data
+go list ./...
+! stdout _data
+
+-- go.mod --
+module m
+
+-- x.go --
+package m
+
+-- _data/x.go --
+package p
diff --git a/src/cmd/go/testdata/script/mod_retract_noupgrade.txt b/src/cmd/go/testdata/script/mod_retract_noupgrade.txt
new file mode 100644
index 0000000000..67de79f42d
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_retract_noupgrade.txt
@@ -0,0 +1,11 @@
+go list -m -u example.com/retract/noupgrade
+stdout '^example.com/retract/noupgrade v1.0.0 \(retracted\)$'
+
+-- go.mod --
+module use
+
+go 1.19
+
+require example.com/retract/noupgrade v1.0.0
+-- go.sum --
+example.com/retract/noupgrade v1.0.0/go.mod h1:q2/HnBejUQ83RcUo4stf2U++/Zr9R/Ky3BsodjKBkQ4=
diff --git a/src/cmd/go/testdata/script/reuse_git.txt b/src/cmd/go/testdata/script/reuse_git.txt
new file mode 100644
index 0000000000..a5a0c8a9a0
--- /dev/null
+++ b/src/cmd/go/testdata/script/reuse_git.txt
@@ -0,0 +1,425 @@
+[short] skip
+[!exec:git] skip
+[!net] skip
+
+env GO111MODULE=on
+env GOPROXY=direct
+env GOSUMDB=off
+
+# go mod download with the pseudo-version should invoke git but not have a TagSum or Ref.
+go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
+stderr 'git fetch'
+cp stdout hellopseudo.json
+! stdout '"(Query|TagPrefix|TagSum|Ref)"'
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+go clean -modcache
+
+# go mod download vcstest/hello should invoke git, print origin info
+go mod download -x -json vcs-test.golang.org/git/hello.git@latest
+stderr 'git fetch'
+cp stdout hello.json
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+stdout '"Query": "latest"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+
+# pseudo-version again should not invoke git fetch (it has the version from the @latest query)
+# but still be careful not to include a TagSum or a Ref, especially not Ref set to HEAD,
+# which is easy to do when reusing the cached version from the @latest query.
+go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
+! stderr 'git fetch'
+cp stdout hellopseudo2.json
+cmp hellopseudo.json hellopseudo2.json
+
+# go mod download vcstest/hello@hash needs to check TagSum to find pseudoversion base.
+go mod download -x -json vcs-test.golang.org/git/hello.git@fc3a09f3dc5c
+! stderr 'git fetch'
+cp stdout hellohash.json
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"Query": "fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+
+# go mod download vcstest/hello/v9 should fail, still print origin info
+! go mod download -x -json vcs-test.golang.org/git/hello.git/v9@latest
+cp stdout hellov9.json
+stdout '"Version": "latest"'
+stdout '"Error":.*no matching versions'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+! stdout '"(Ref|Hash|RepoSum)":'
+
+# go mod download vcstest/hello/sub/v9 should also fail, print origin info with TagPrefix
+! go mod download -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest
+cp stdout hellosubv9.json
+stdout '"Version": "latest"'
+stdout '"Error":.*no matching versions'
+stdout '"TagPrefix": "sub/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+! stdout '"(Ref|Hash|RepoSum)":'
+
+# go mod download vcstest/hello@nonexist should fail, still print origin info
+! go mod download -x -json vcs-test.golang.org/git/hello.git@nonexist
+cp stdout hellononexist.json
+stdout '"Version": "nonexist"'
+stdout '"Error":.*unknown revision nonexist'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+
+# go mod download vcstest/hello@1234567890123456789012345678901234567890 should fail, still print origin info
+# (40 hex digits is assumed to be a full hash and is a slightly different code path from @nonexist)
+! go mod download -x -json vcs-test.golang.org/git/hello.git@1234567890123456789012345678901234567890
+cp stdout hellononhash.json
+stdout '"Version": "1234567890123456789012345678901234567890"'
+stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+
+# go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc should fail, still print origin info
+# (non-existent pseudoversion)
+! go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20220101120101-123456789abc
+cp stdout hellononpseudo.json
+stdout '"Version": "v0.0.0-20220101120101-123456789abc"'
+stdout '"Error":.*unknown revision 123456789abc'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+
+# go mod download vcstest/tagtests should invoke git, print origin info
+go mod download -x -json vcs-test.golang.org/git/tagtests.git@latest
+stderr 'git fetch'
+cp stdout tagtests.json
+stdout '"Version": "v0.2.2"'
+stdout '"Query": "latest"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+stdout '"Ref": "refs/tags/v0.2.2"'
+stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
+
+# go mod download vcstest/tagtests@v0.2.2 should print origin info, no TagSum needed
+go mod download -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+cp stdout tagtestsv022.json
+stdout '"Version": "v0.2.2"'
+! stdout '"Query":'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+! stdout '"TagSum"'
+stdout '"Ref": "refs/tags/v0.2.2"'
+stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
+
+# go mod download vcstest/tagtests@master needs a TagSum again
+go mod download -x -json vcs-test.golang.org/git/tagtests.git@master
+cp stdout tagtestsmaster.json
+stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
+stdout '"Query": "master"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+stdout '"Ref": "refs/heads/master"'
+stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
+
+# go mod download vcstest/prefixtagtests should invoke git, print origin info
+go mod download -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest
+stderr 'git fetch'
+cp stdout prefixtagtests.json
+stdout '"Version": "v0.0.10"'
+stdout '"Query": "latest"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/prefixtagtests"'
+stdout '"Subdir": "sub"'
+stdout '"TagPrefix": "sub/"'
+stdout '"TagSum": "t1:YGSbWkJ8dn9ORAr[+]BlKHFK/2ZhXLb9hVuYfTZ9D8C7g="'
+stdout '"Ref": "refs/tags/sub/v0.0.10"'
+stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"'
+
+# go mod download of a bunch of these should fail (some are invalid) but write good JSON for later
+! go mod download -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master
+cp stdout all.json
+
+# clean the module cache, make sure that makes go mod download re-run git fetch, clean again
+go clean -modcache
+go mod download -x -json vcs-test.golang.org/git/hello.git@latest
+stderr 'git fetch'
+go clean -modcache
+
+# reuse go mod download vcstest/hello result
+go mod download -reuse=hello.json -x -json vcs-test.golang.org/git/hello.git@latest
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Ref": "HEAD"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+! stdout '"Dir"'
+! stdout '"Info"'
+! stdout '"GoMod"'
+! stdout '"Zip"'
+
+# reuse go mod download vcstest/hello pseudoversion result
+go mod download -reuse=hellopseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+! stdout '"(Query|TagPrefix|TagSum|Ref)"'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello@hash
+go mod download -reuse=hellohash.json -x -json vcs-test.golang.org/git/hello.git@fc3a09f3dc5c
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Query": "fc3a09f3dc5c"'
+stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/hello"'
+! stdout '"(TagPrefix|Ref)"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello/v9 error result
+! go mod download -reuse=hellov9.json -x -json vcs-test.golang.org/git/hello.git/v9@latest
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Error":.*no matching versions'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+! stdout '"(Ref|Hash)":'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello/sub/v9 error result
+! go mod download -reuse=hellosubv9.json -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Error":.*no matching versions'
+stdout '"TagPrefix": "sub/"'
+stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
+! stdout '"(Ref|Hash)":'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello@nonexist
+! go mod download -reuse=hellononexist.json -x -json vcs-test.golang.org/git/hello.git@nonexist
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "nonexist"'
+stdout '"Error":.*unknown revision nonexist'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello@1234567890123456789012345678901234567890
+! go mod download -reuse=hellononhash.json -x -json vcs-test.golang.org/git/hello.git@1234567890123456789012345678901234567890
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "1234567890123456789012345678901234567890"'
+stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc
+! go mod download -reuse=hellononpseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20220101120101-123456789abc
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.0.0-20220101120101-123456789abc"'
+stdout '"Error":.*unknown revision 123456789abc'
+stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
+! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/tagtests result
+go mod download -reuse=tagtests.json -x -json vcs-test.golang.org/git/tagtests.git@latest
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.2.2"'
+stdout '"Query": "latest"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+stdout '"Ref": "refs/tags/v0.2.2"'
+stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/tagtests@v0.2.2 result
+go mod download -reuse=tagtestsv022.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.2.2"'
+! stdout '"Query":'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+! stdout '"TagSum"'
+stdout '"Ref": "refs/tags/v0.2.2"'
+stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/tagtests@master result
+go mod download -reuse=tagtestsmaster.json -x -json vcs-test.golang.org/git/tagtests.git@master
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
+stdout '"Query": "master"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+stdout '"Ref": "refs/heads/master"'
+stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse go mod download vcstest/tagtests@master result again with all.json
+go mod download -reuse=all.json -x -json vcs-test.golang.org/git/tagtests.git@master
+! stderr 'git fetch'
+stdout '"Reuse": true'
+stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
+stdout '"Query": "master"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"TagPrefix"'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+stdout '"Ref": "refs/heads/master"'
+stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# go mod download vcstest/prefixtagtests result with json
+go mod download -reuse=prefixtagtests.json -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest
+! stderr 'git fetch'
+stdout '"Version": "v0.0.10"'
+stdout '"Query": "latest"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/prefixtagtests"'
+stdout '"Subdir": "sub"'
+stdout '"TagPrefix": "sub/"'
+stdout '"TagSum": "t1:YGSbWkJ8dn9ORAr[+]BlKHFK/2ZhXLb9hVuYfTZ9D8C7g="'
+stdout '"Ref": "refs/tags/sub/v0.0.10"'
+stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse the bulk results with all.json
+! go mod download -reuse=all.json -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master
+! stderr 'git fetch'
+stdout '"Reuse": true'
+! stdout '"(Dir|Info|GoMod|Zip)"'
+
+# reuse attempt with stale hash should reinvoke git, not report reuse
+go mod download -reuse=tagtestsv022badhash.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+stderr 'git fetch'
+! stdout '"Reuse": true'
+stdout '"Version": "v0.2.2"'
+! stdout '"Query"'
+stdout '"VCS": "git"'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+! stdout '"(TagPrefix|TagSum)"'
+stdout '"Ref": "refs/tags/v0.2.2"'
+stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
+stdout '"Dir"'
+stdout '"Info"'
+stdout '"GoMod"'
+stdout '"Zip"'
+
+# reuse with stale repo URL
+go mod download -reuse=tagtestsv022badurl.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+! stdout '"Reuse": true'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+stdout '"Dir"'
+stdout '"Info"'
+stdout '"GoMod"'
+stdout '"Zip"'
+
+# reuse with stale VCS
+go mod download -reuse=tagtestsv022badvcs.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+! stdout '"Reuse": true'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+
+# reuse with stale Dir
+go mod download -reuse=tagtestsv022baddir.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
+! stdout '"Reuse": true'
+stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
+
+# reuse with stale TagSum
+go mod download -reuse=tagtestsbadtagsum.json -x -json vcs-test.golang.org/git/tagtests.git@latest
+! stdout '"Reuse": true'
+stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
+
+-- tagtestsv022badhash.json --
+{
+ "Path": "vcs-test.golang.org/git/tagtests.git",
+ "Version": "v0.2.2",
+ "Origin": {
+ "VCS": "git",
+ "URL": "https://vcs-test.golang.org/git/tagtests",
+ "Ref": "refs/tags/v0.2.2",
+ "Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952XXX"
+ }
+}
+
+-- tagtestsbadtagsum.json --
+{
+ "Path": "vcs-test.golang.org/git/tagtests.git",
+ "Version": "v0.2.2",
+ "Query": "latest",
+ "Origin": {
+ "VCS": "git",
+ "URL": "https://vcs-test.golang.org/git/tagtests",
+ "TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo=XXX",
+ "Ref": "refs/tags/v0.2.2",
+ "Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
+ },
+ "Reuse": true
+}
+
+-- tagtestsv022badvcs.json --
+{
+ "Path": "vcs-test.golang.org/git/tagtests.git",
+ "Version": "v0.2.2",
+ "Origin": {
+ "VCS": "gitXXX",
+ "URL": "https://vcs-test.golang.org/git/tagtests",
+ "Ref": "refs/tags/v0.2.2",
+ "Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
+ }
+}
+
+-- tagtestsv022baddir.json --
+{
+ "Path": "vcs-test.golang.org/git/tagtests.git",
+ "Version": "v0.2.2",
+ "Origin": {
+ "VCS": "git",
+ "URL": "https://vcs-test.golang.org/git/tagtests",
+ "Subdir": "subdir",
+ "Ref": "refs/tags/v0.2.2",
+ "Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
+ }
+}
+
+-- tagtestsv022badurl.json --
+{
+ "Path": "vcs-test.golang.org/git/tagtests.git",
+ "Version": "v0.2.2",
+ "Origin": {
+ "VCS": "git",
+ "URL": "https://vcs-test.golang.org/git/tagtestsXXX",
+ "Ref": "refs/tags/v0.2.2",
+ "Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
+ }
+}
diff --git a/src/cmd/go/testdata/script/test_fuzz_cache.txt b/src/cmd/go/testdata/script/test_fuzz_cache.txt
index 552966b06b..19fb764add 100644
--- a/src/cmd/go/testdata/script/test_fuzz_cache.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_cache.txt
@@ -17,15 +17,12 @@ go clean -cache
exists $GOCACHE/fuzz
# 'go clean -fuzzcache' should delete the fuzz cache but not the build cache.
-go list -f {{.Stale}} ./empty
-stdout true
-go install ./empty
-go list -f {{.Stale}} ./empty
-stdout false
+go build -x ./empty
+stderr '(compile|gccgo)( |\.exe).*empty.go'
go clean -fuzzcache
! exists $GOCACHE/fuzz
-go list -f {{.Stale}} ./empty
-stdout false
+go build -x ./empty
+! stderr '(compile|gccgo)( |\.exe).*empty.go'
# Fuzzing indicates that one new interesting value was found with an empty
# corpus, and the total size of the cache is now 1.
diff --git a/src/cmd/go/testdata/script/work_goproxy_off.txt b/src/cmd/go/testdata/script/work_goproxy_off.txt
new file mode 100644
index 0000000000..0a602e3d7b
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_goproxy_off.txt
@@ -0,0 +1,59 @@
+go work init
+go work use . ./sub
+
+# Verify that the go.mod files for both modules in the workspace are tidy,
+# and add missing go.sum entries as needed.
+
+cp go.mod go.mod.orig
+go mod tidy
+cmp go.mod go.mod.orig
+
+cd sub
+cp go.mod go.mod.orig
+go mod tidy
+cmp go.mod go.mod.orig
+cd ..
+
+go list -m all
+stdout '^rsc\.io/quote v1\.5\.1$'
+stdout '^rsc\.io/sampler v1\.3\.1$'
+
+# Now remove the module dependencies from the module cache.
+# Because one module upgrades a transitive dependency needed by another,
+# listing the modules in the workspace should error out.
+
+go clean -modcache
+env GOPROXY=off
+! go list -m all
+stderr '^go: rsc.io/sampler@v1.3.0: module lookup disabled by GOPROXY=off$'
+
+-- example.go --
+package example
+
+import _ "rsc.io/sampler"
+-- go.mod --
+module example
+
+go 1.19
+
+require rsc.io/sampler v1.3.0
+
+require (
+ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
+ rsc.io/testonly v1.0.0 // indirect
+)
+-- sub/go.mod --
+module example/sub
+
+go 1.19
+
+require rsc.io/quote v1.5.1
+
+require (
+ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
+ rsc.io/sampler v1.3.1 // indirect
+)
+-- sub/sub.go --
+package example
+
+import _ "rsc.io/quote"
diff --git a/src/cmd/go/testdata/script/work_why_download_graph.txt b/src/cmd/go/testdata/script/work_why_download_graph.txt
index 7964c914a2..8f1aeddf47 100644
--- a/src/cmd/go/testdata/script/work_why_download_graph.txt
+++ b/src/cmd/go/testdata/script/work_why_download_graph.txt
@@ -7,13 +7,19 @@ exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+grep '^rsc\.io/quote v1\.5\.2/go\.mod h1:' go.work.sum
+grep '^rsc\.io/quote v1\.5\.2 h1:' go.work.sum
+go clean -modcache
+rm go.work.sum
go mod download
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+grep '^rsc\.io/quote v1\.5\.2/go\.mod h1:' go.work.sum
+grep '^rsc\.io/quote v1\.5\.2 h1:' go.work.sum
go mod why rsc.io/quote
stdout '# rsc.io/quote\nexample.com/a\nrsc.io/quote'
@@ -25,8 +31,8 @@ stdout 'example.com/a rsc.io/quote@v1.5.2\nexample.com/b example.com/c@v1.0.0\nr
go 1.18
use (
- ./a
- ./b
+ ./a
+ ./b
)
-- a/go.mod --
go 1.18
diff --git a/src/cmd/internal/notsha256/sha256block_386.s b/src/cmd/internal/notsha256/sha256block_386.s
index 086a0ab25c..f2ba7d7a9b 100644
--- a/src/cmd/internal/notsha256/sha256block_386.s
+++ b/src/cmd/internal/notsha256/sha256block_386.s
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !purego
+// +build !purego
+
// SHA256 block routine. See sha256block.go for Go equivalent.
//
// The algorithm is detailed in FIPS 180-4:
diff --git a/src/cmd/internal/notsha256/sha256block_amd64.go b/src/cmd/internal/notsha256/sha256block_amd64.go
index 676c4f70d9..27b84a86b1 100644
--- a/src/cmd/internal/notsha256/sha256block_amd64.go
+++ b/src/cmd/internal/notsha256/sha256block_amd64.go
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !purego
+// +build !purego
+
package notsha256
var useAVX2 = false
diff --git a/src/cmd/internal/notsha256/sha256block_amd64.s b/src/cmd/internal/notsha256/sha256block_amd64.s
index b2ae7c5fc9..36ea74451d 100644
--- a/src/cmd/internal/notsha256/sha256block_amd64.s
+++ b/src/cmd/internal/notsha256/sha256block_amd64.s
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !purego
+// +build !purego
+
#include "textflag.h"
// SHA256 block routine. See sha256block.go for Go equivalent.
diff --git a/src/cmd/internal/notsha256/sha256block_decl.go b/src/cmd/internal/notsha256/sha256block_decl.go
index 5a822ee479..631f1a4a1b 100644
--- a/src/cmd/internal/notsha256/sha256block_decl.go
+++ b/src/cmd/internal/notsha256/sha256block_decl.go
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build 386 || amd64 || ppc64le || ppc64
+//go:build !purego && (386 || amd64 || ppc64le || ppc64)
+// +build !purego
// +build 386 amd64 ppc64le ppc64
package notsha256
diff --git a/src/cmd/internal/notsha256/sha256block_generic.go b/src/cmd/internal/notsha256/sha256block_generic.go
index 20ae841383..2664722bc2 100644
--- a/src/cmd/internal/notsha256/sha256block_generic.go
+++ b/src/cmd/internal/notsha256/sha256block_generic.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !amd64 && !386 && !ppc64le && !ppc64
-// +build !amd64,!386,!ppc64le,!ppc64
+//go:build purego || (!amd64 && !386 && !ppc64le && !ppc64)
+// +build purego !amd64,!386,!ppc64le,!ppc64
package notsha256
diff --git a/src/cmd/internal/notsha256/sha256block_ppc64x.s b/src/cmd/internal/notsha256/sha256block_ppc64x.s
index 6e0f1d6133..e907d3b71b 100644
--- a/src/cmd/internal/notsha256/sha256block_ppc64x.s
+++ b/src/cmd/internal/notsha256/sha256block_ppc64x.s
@@ -8,7 +8,8 @@
// bootstrap toolchain.
//
-//go:build ppc64 || ppc64le
+//go:build !purego && (ppc64 || ppc64le)
+// +build !purego
// +build ppc64 ppc64le
// Based on CRYPTOGAMS code with the following comment:
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 565ff9d634..18910ddb85 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1426,10 +1426,23 @@ func (ctxt *Link) hostlink() {
argv = append(argv, "-Wl,-pagezero_size,4000000")
}
}
+ if *flagRace && ctxt.HeadType == objabi.Hwindows {
+ // Current windows/amd64 race detector tsan support
+ // library can't handle PIE mode (see #53539 for more details).
+ // For now, explicitly disable PIE (since some compilers
+ // default to it) if -race is in effect.
+ argv = addASLRargs(argv, false)
+ }
case BuildModePIE:
switch ctxt.HeadType {
case objabi.Hdarwin, objabi.Haix:
case objabi.Hwindows:
+ if *flagAslr && *flagRace {
+ // Current windows/amd64 race detector tsan support
+ // library can't handle PIE mode (see #53539 for more details).
+ // Disable alsr if -race in effect.
+ *flagAslr = false
+ }
argv = addASLRargs(argv, *flagAslr)
default:
// ELF.
diff --git a/src/cmd/nm/nm_test.go b/src/cmd/nm/nm_test.go
index 226c2c3bcd..4bc9bf9079 100644
--- a/src/cmd/nm/nm_test.go
+++ b/src/cmd/nm/nm_test.go
@@ -250,23 +250,14 @@ func testGoLib(t *testing.T, iscgo bool) {
t.Fatal(err)
}
- args := []string{"install", "mylib"}
- cmd := exec.Command(testenv.GoToolPath(t), args...)
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-buildmode=archive", "-o", "mylib.a", ".")
cmd.Dir = libpath
cmd.Env = append(os.Environ(), "GOPATH="+gopath)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("building test lib failed: %s %s", err, out)
}
- pat := filepath.Join(gopath, "pkg", "*", "mylib.a")
- ms, err := filepath.Glob(pat)
- if err != nil {
- t.Fatal(err)
- }
- if len(ms) == 0 {
- t.Fatalf("cannot found paths for pattern %s", pat)
- }
- mylib := ms[0]
+ mylib := filepath.Join(libpath, "mylib.a")
out, err = exec.Command(testnmpath, mylib).CombinedOutput()
if err != nil {
diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go
index 11804d0b90..0e4d882c97 100644
--- a/src/cmd/trace/main.go
+++ b/src/cmd/trace/main.go
@@ -247,7 +247,7 @@ p { color: grey85; font-size:85%; }
because it made a system call or tried to acquire a mutex.
Directly underneath each bar, a smaller bar or more commonly a fine
- vertical line indicates an event occuring during its execution.
+ vertical line indicates an event occurring during its execution.
Some of these are related to garbage collection; most indicate that
a goroutine yielded its logical processor but then immediately resumed execution
on the same logical processor. Clicking on the event displays the stack trace
@@ -274,7 +274,7 @@ p { color: grey85; font-size:85%; }
function written in C.
</p>
<p>
- Above the event trace for the first logical processor are
+ Above the event trace for the first logical processor are
traces for various runtime-internal events.
The "GC" bar shows when the garbage collector is running, and in which stage.
diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go
index 1cabc25ced..e6c4cca72e 100644
--- a/src/cmd/trace/trace.go
+++ b/src/cmd/trace/trace.go
@@ -571,7 +571,7 @@ func generateTrace(params *traceParams, consumer traceConsumer) error {
fname := stk[0].Fn
info.name = fmt.Sprintf("G%v %s", newG, fname)
- info.isSystemG = isSystemGoroutine(fname)
+ info.isSystemG = trace.IsSystemGoroutine(fname)
ctx.gcount++
setGState(ev, newG, gDead, gRunnable)
@@ -1129,12 +1129,6 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
return ctx.buildBranch(node, stk)
}
-func isSystemGoroutine(entryFn string) bool {
- // This mimics runtime.isSystemGoroutine as closely as
- // possible.
- return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
-}
-
// firstTimestamp returns the timestamp of the first event record.
func firstTimestamp() int64 {
res, _ := parseTrace()
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index aa6780f847..ba8de97e6a 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -248,42 +248,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
return 0, z.err
}
- n, z.err = z.decompressor.Read(p)
- z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
- z.size += uint32(n)
- if z.err != io.EOF {
- // In the normal case we return here.
- return n, z.err
- }
+ for n == 0 {
+ n, z.err = z.decompressor.Read(p)
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+ z.size += uint32(n)
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
+ }
- // Finished file; check checksum and size.
- if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
- z.err = noEOF(err)
- return n, z.err
- }
- digest := le.Uint32(z.buf[:4])
- size := le.Uint32(z.buf[4:8])
- if digest != z.digest || size != z.size {
- z.err = ErrChecksum
- return n, z.err
- }
- z.digest, z.size = 0, 0
+ // Finished file; check checksum and size.
+ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+ z.err = noEOF(err)
+ return n, z.err
+ }
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return n, z.err
+ }
+ z.digest, z.size = 0, 0
- // File is ok; check if there is another.
- if !z.multistream {
- return n, io.EOF
- }
- z.err = nil // Remove io.EOF
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return n, io.EOF
+ }
+ z.err = nil // Remove io.EOF
- if _, z.err = z.readHeader(); z.err != nil {
- return n, z.err
+ if _, z.err = z.readHeader(); z.err != nil {
+ return n, z.err
+ }
}
- // Read from next file, if necessary.
- if n > 0 {
- return n, nil
- }
- return z.Read(p)
+ return n, nil
}
// Close closes the Reader. It does not close the underlying io.Reader.
diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
index be69185463..3309ff6195 100644
--- a/src/compress/gzip/gunzip_test.go
+++ b/src/compress/gzip/gunzip_test.go
@@ -569,3 +569,19 @@ func TestTruncatedStreams(t *testing.T) {
}
}
}
+
+func TestCVE202230631(t *testing.T) {
+ var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
+ 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ r := bytes.NewReader(bytes.Repeat(empty, 4e6))
+ z, err := NewReader(r)
+ if err != nil {
+ t.Fatalf("NewReader: got %v, want nil", err)
+ }
+ // Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
+ // to stack exhaustion.
+ _, err = z.Read(make([]byte, 10))
+ if err != io.EOF {
+ t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
+ }
+}
diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go
index e0e8f6125f..a2d3d80964 100644
--- a/src/crypto/x509/parser.go
+++ b/src/crypto/x509/parser.go
@@ -1008,22 +1008,22 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
// we can populate RevocationList.Raw, before unwrapping the
// SEQUENCE so it can be operated on
if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed certificate")
+ return nil, errors.New("x509: malformed crl")
}
rl.Raw = input
if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed certificate")
+ return nil, errors.New("x509: malformed crl")
}
var tbs cryptobyte.String
// do the same trick again as above to extract the raw
// bytes for Certificate.RawTBSCertificate
if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed tbs certificate")
+ return nil, errors.New("x509: malformed tbs crl")
}
rl.RawTBSRevocationList = tbs
if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed tbs certificate")
+ return nil, errors.New("x509: malformed tbs crl")
}
var version int
@@ -1106,13 +1106,10 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
}
var extensions cryptobyte.String
var present bool
- if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
+ if !certSeq.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed extensions")
}
if present {
- if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
- return nil, errors.New("x509: malformed extensions")
- }
for !extensions.Empty() {
var extension cryptobyte.String
if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
@@ -1148,6 +1145,15 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
if err != nil {
return nil, err
}
+ if ext.Id.Equal(oidExtensionAuthorityKeyId) {
+ rl.AuthorityKeyId = ext.Value
+ } else if ext.Id.Equal(oidExtensionCRLNumber) {
+ value := cryptobyte.String(ext.Value)
+ rl.Number = new(big.Int)
+ if !value.ReadASN1Integer(rl.Number) {
+ return nil, errors.New("x509: malformed crl number")
+ }
+ }
rl.Extensions = append(rl.Extensions, ext)
}
}
diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go
index 87eb1f7720..950f6d08c8 100644
--- a/src/crypto/x509/x509.go
+++ b/src/crypto/x509/x509.go
@@ -2109,7 +2109,9 @@ type RevocationList struct {
// Issuer contains the DN of the issuing certificate.
Issuer pkix.Name
// AuthorityKeyId is used to identify the public key associated with the
- // issuing certificate.
+ // issuing certificate. It is populated from the authorityKeyIdentifier
+ // extension when parsing a CRL. It is ignored when creating a CRL; the
+ // extension is populated from the issuing certificate itself.
AuthorityKeyId []byte
Signature []byte
@@ -2125,7 +2127,8 @@ type RevocationList struct {
// Number is used to populate the X.509 v2 cRLNumber extension in the CRL,
// which should be a monotonically increasing sequence number for a given
- // CRL scope and CRL issuer.
+ // CRL scope and CRL issuer. It is also populated from the cRLNumber
+ // extension when parsing a CRL.
Number *big.Int
// ThisUpdate is used to populate the thisUpdate field in the CRL, which
@@ -2193,6 +2196,10 @@ func CreateRevocationList(rand io.Reader, template *RevocationList, issuer *Cert
if err != nil {
return nil, err
}
+
+ if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) {
+ return nil, errors.New("x509: CRL number exceeds 20 octets")
+ }
crlNum, err := asn1.Marshal(template.Number)
if err != nil {
return nil, err
diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go
index 8ef6115df4..cba44f6f8c 100644
--- a/src/crypto/x509/x509_test.go
+++ b/src/crypto/x509/x509_test.go
@@ -2479,6 +2479,40 @@ func TestCreateRevocationList(t *testing.T) {
expectedError: "x509: template contains nil Number field",
},
{
+ name: "long Number",
+ key: ec256Priv,
+ issuer: &Certificate{
+ KeyUsage: KeyUsageCRLSign,
+ Subject: pkix.Name{
+ CommonName: "testing",
+ },
+ SubjectKeyId: []byte{1, 2, 3},
+ },
+ template: &RevocationList{
+ ThisUpdate: time.Time{}.Add(time.Hour * 24),
+ NextUpdate: time.Time{}.Add(time.Hour * 48),
+ Number: big.NewInt(0).SetBytes(append([]byte{1}, make([]byte, 20)...)),
+ },
+ expectedError: "x509: CRL number exceeds 20 octets",
+ },
+ {
+ name: "long Number (20 bytes, MSB set)",
+ key: ec256Priv,
+ issuer: &Certificate{
+ KeyUsage: KeyUsageCRLSign,
+ Subject: pkix.Name{
+ CommonName: "testing",
+ },
+ SubjectKeyId: []byte{1, 2, 3},
+ },
+ template: &RevocationList{
+ ThisUpdate: time.Time{}.Add(time.Hour * 24),
+ NextUpdate: time.Time{}.Add(time.Hour * 48),
+ Number: big.NewInt(0).SetBytes(append([]byte{255}, make([]byte, 19)...)),
+ },
+ expectedError: "x509: CRL number exceeds 20 octets",
+ },
+ {
name: "invalid signature algorithm",
key: ec256Priv,
issuer: &Certificate{
@@ -2525,6 +2559,34 @@ func TestCreateRevocationList(t *testing.T) {
},
},
{
+ name: "valid, extra entry extension",
+ key: ec256Priv,
+ issuer: &Certificate{
+ KeyUsage: KeyUsageCRLSign,
+ Subject: pkix.Name{
+ CommonName: "testing",
+ },
+ SubjectKeyId: []byte{1, 2, 3},
+ },
+ template: &RevocationList{
+ RevokedCertificates: []pkix.RevokedCertificate{
+ {
+ SerialNumber: big.NewInt(2),
+ RevocationTime: time.Time{}.Add(time.Hour),
+ Extensions: []pkix.Extension{
+ {
+ Id: []int{2, 5, 29, 99},
+ Value: []byte{5, 0},
+ },
+ },
+ },
+ },
+ Number: big.NewInt(5),
+ ThisUpdate: time.Time{}.Add(time.Hour * 24),
+ NextUpdate: time.Time{}.Add(time.Hour * 48),
+ },
+ },
+ {
name: "valid, Ed25519 key",
key: ed25519Priv,
issuer: &Certificate{
@@ -2681,6 +2743,19 @@ func TestCreateRevocationList(t *testing.T) {
t.Fatalf("Extensions mismatch: got %v; want %v.",
parsedCRL.Extensions[2:], tc.template.ExtraExtensions)
}
+
+ if tc.template.Number != nil && parsedCRL.Number == nil {
+ t.Fatalf("Generated CRL missing Number: got nil, want %s",
+ tc.template.Number.String())
+ }
+ if tc.template.Number != nil && tc.template.Number.Cmp(parsedCRL.Number) != 0 {
+ t.Fatalf("Generated CRL has wrong Number: got %s, want %s",
+ parsedCRL.Number.String(), tc.template.Number.String())
+ }
+ if !bytes.Equal(parsedCRL.AuthorityKeyId, expectedAKI) {
+ t.Fatalf("Generated CRL has wrong Number: got %x, want %x",
+ parsedCRL.AuthorityKeyId, expectedAKI)
+ }
})
}
}
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 6bc869fc86..8c58723c03 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -449,6 +449,16 @@ func TestQueryContextWait(t *testing.T) {
// TestTxContextWait tests the transaction behavior when the tx context is canceled
// during execution of the query.
func TestTxContextWait(t *testing.T) {
+ testContextWait(t, false)
+}
+
+// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
+// the final connection.
+func TestTxContextWaitNoDiscard(t *testing.T) {
+ testContextWait(t, true)
+}
+
+func testContextWait(t *testing.T, keepConnOnRollback bool) {
db := newTestDB(t, "people")
defer closeDB(t, db)
@@ -458,7 +468,7 @@ func TestTxContextWait(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- tx.keepConnOnRollback = false
+ tx.keepConnOnRollback = keepConnOnRollback
tx.dc.ci.(*fakeConn).waiter = func(c context.Context) {
cancel()
@@ -472,36 +482,11 @@ func TestTxContextWait(t *testing.T) {
t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
}
- waitForFree(t, db, 0)
-}
-
-// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
-// the final connection.
-func TestTxContextWaitNoDiscard(t *testing.T) {
- db := newTestDB(t, "people")
- defer closeDB(t, db)
-
- ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
- defer cancel()
-
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- // Guard against the context being canceled before BeginTx completes.
- if err == context.DeadlineExceeded {
- t.Skip("tx context canceled prior to first use")
- }
- t.Fatal(err)
- }
-
- // This will trigger the *fakeConn.Prepare method which will take time
- // performing the query. The ctxDriverPrepare func will check the context
- // after this and close the rows and return an error.
- _, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
- if err != context.DeadlineExceeded {
- t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
+ if keepConnOnRollback {
+ waitForFree(t, db, 1)
+ } else {
+ waitForFree(t, db, 0)
}
-
- waitForFree(t, db, 1)
}
// TestUnsupportedOptions checks that the database fails when a driver that
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index 34f302a5cf..eea2924f1a 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -871,8 +871,13 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
return &op
}
+var maxIgnoreNestingDepth = 10000
+
// decIgnoreOpFor returns the decoding op for a field that has no destination.
-func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
+func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
+ if depth > maxIgnoreNestingDepth {
+ error_(errors.New("invalid nesting depth"))
+ }
// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building.
if opPtr := inProgress[wireId]; opPtr != nil {
@@ -896,7 +901,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
errorf("bad data: undefined type %s", wireId.string())
case wire.ArrayT != nil:
elemId := wire.ArrayT.Elem
- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
}
@@ -904,15 +909,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
case wire.MapT != nil:
keyId := dec.wireType[wireId].MapT.Key
elemId := dec.wireType[wireId].MapT.Elem
- keyOp := dec.decIgnoreOpFor(keyId, inProgress)
- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+ keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
+ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreMap(state, *keyOp, *elemOp)
}
case wire.SliceT != nil:
elemId := wire.SliceT.Elem
- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreSlice(state, *elemOp)
}
@@ -1073,7 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de
func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
engine := new(decEngine)
engine.instr = make([]decInstr, 1) // one item
- op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
+ op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
ovfl := overflow(dec.typeString(remoteId))
engine.instr[0] = decInstr{*op, 0, nil, ovfl}
engine.numInstr = 1
@@ -1118,7 +1123,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn
localField, present := srt.FieldByName(wireField.Name)
// TODO(r): anonymous names
if !present || !isExported(wireField.Name) {
- op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
+ op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
continue
}
diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
index 1d5dde22a4..6fefd36756 100644
--- a/src/encoding/gob/gobencdec_test.go
+++ b/src/encoding/gob/gobencdec_test.go
@@ -12,6 +12,7 @@ import (
"fmt"
"io"
"net"
+ "reflect"
"strings"
"testing"
"time"
@@ -796,3 +797,26 @@ func TestNetIP(t *testing.T) {
t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
}
}
+
+func TestIgnoreDepthLimit(t *testing.T) {
+ // We don't test the actual depth limit because it requires building an
+ // extremely large message, which takes quite a while.
+ oldNestingDepth := maxIgnoreNestingDepth
+ maxIgnoreNestingDepth = 100
+ defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
+ b := new(bytes.Buffer)
+ enc := NewEncoder(b)
+ typ := reflect.TypeOf(int(0))
+ nested := reflect.ArrayOf(1, typ)
+ for i := 0; i < 100; i++ {
+ nested = reflect.ArrayOf(1, nested)
+ }
+ badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
+ enc.Encode(badStruct.Interface())
+ dec := NewDecoder(b)
+ var output struct{ Hello int }
+ expectedErr := "invalid nesting depth"
+ if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
+ t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
+ }
+}
diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
index 257591262f..a6fb665458 100644
--- a/src/encoding/xml/read.go
+++ b/src/encoding/xml/read.go
@@ -152,7 +152,7 @@ func (d *Decoder) DecodeElement(v any, start *StartElement) error {
if val.IsNil() {
return errors.New("nil pointer passed to Unmarshal")
}
- return d.unmarshal(val.Elem(), start)
+ return d.unmarshal(val.Elem(), start, 0)
}
// An UnmarshalError represents an error in the unmarshaling process.
@@ -308,8 +308,15 @@ var (
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
+const maxUnmarshalDepth = 10000
+
+var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
+
// Unmarshal a single XML element into val.
-func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
+ if depth >= maxUnmarshalDepth {
+ return errExeceededMaxUnmarshalDepth
+ }
// Find start element if we need it.
if start == nil {
for {
@@ -402,7 +409,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
// Recur to read element into slice.
- if err := d.unmarshal(v.Index(n), start); err != nil {
+ if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
v.SetLen(n)
return err
}
@@ -525,13 +532,15 @@ Loop:
case StartElement:
consumed := false
if sv.IsValid() {
- consumed, err = d.unmarshalPath(tinfo, sv, nil, &t)
+ // unmarshalPath can call unmarshal, so we need to pass the depth through so that
+ // we can continue to enforce the maximum recusion limit.
+ consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
- if err := d.unmarshal(saveAny, &t); err != nil {
+ if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
return err
}
}
@@ -676,7 +685,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) {
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
-func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
@@ -691,7 +700,7 @@ Loop:
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
- return true, d.unmarshal(finfo.value(sv, initNilPointers), start)
+ return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
@@ -720,7 +729,9 @@ Loop:
}
switch t := tok.(type) {
case StartElement:
- consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t)
+ // the recursion depth of unmarshalPath is limited to the path length specified
+ // by the struct field tag, so we don't increment the depth here.
+ consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
if err != nil {
return true, err
}
@@ -736,12 +747,12 @@ Loop:
}
// Skip reads tokens until it has consumed the end element
-// matching the most recent start element already consumed.
-// It recurs if it encounters a start element, so it can be used to
-// skip nested structures.
+// matching the most recent start element already consumed,
+// skipping nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
+ var depth int64
for {
tok, err := d.Token()
if err != nil {
@@ -749,11 +760,12 @@ func (d *Decoder) Skip() error {
}
switch tok.(type) {
case StartElement:
- if err := d.Skip(); err != nil {
- return err
- }
+ depth++
case EndElement:
- return nil
+ if depth == 0 {
+ return nil
+ }
+ depth--
}
}
}
diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
index 6ef55de77b..35385c6490 100644
--- a/src/encoding/xml/read_test.go
+++ b/src/encoding/xml/read_test.go
@@ -5,6 +5,8 @@
package xml
import (
+ "bytes"
+ "errors"
"io"
"reflect"
"strings"
@@ -1094,3 +1096,32 @@ func TestUnmarshalIntoNil(t *testing.T) {
}
}
+
+func TestCVE202228131(t *testing.T) {
+ type nested struct {
+ Parent *nested `xml:",any"`
+ }
+ var n nested
+ err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
+ if err == nil {
+ t.Fatal("Unmarshal did not fail")
+ } else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
+ t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
+ }
+}
+
+func TestCVE202230633(t *testing.T) {
+ if testing.Short() {
+ t.Skip("test requires significant memory")
+ }
+ defer func() {
+ p := recover()
+ if p != nil {
+ t.Fatal("Unmarshal panicked")
+ }
+ }()
+ var example struct {
+ Things []string
+ }
+ Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
+}
diff --git a/src/flag/flag.go b/src/flag/flag.go
index a0762441a5..9abf8d769e 100644
--- a/src/flag/flag.go
+++ b/src/flag/flag.go
@@ -49,10 +49,11 @@ The arguments are indexed from 0 through flag.NArg()-1.
The following forms are permitted:
-flag
+ --flag // double dashes are also permitted
-flag=x
-flag x // non-boolean flags only
-One or two minus signs may be used; they are equivalent.
+One or two dashes may be used; they are equivalent.
The last form is not permitted for boolean flags because the
meaning of the command
diff --git a/src/go/build/build.go b/src/go/build/build.go
index bfe3f444ca..dfb37b8f34 100644
--- a/src/go/build/build.go
+++ b/src/go/build/build.go
@@ -715,6 +715,9 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
tried.goroot = dir
}
if ctxt.Compiler == "gccgo" && goroot.IsStandardPackage(ctxt.GOROOT, ctxt.Compiler, path) {
+ // TODO(bcmills): Setting p.Dir here is misleading, because gccgo
+ // doesn't actually load its standard-library packages from this
+ // directory. See if we can leave it unset.
p.Dir = ctxt.joinPath(ctxt.GOROOT, "src", path)
p.Goroot = true
p.Root = ctxt.GOROOT
diff --git a/src/go/build/constraint/expr.go b/src/go/build/constraint/expr.go
index d64eead654..505cbffa4c 100644
--- a/src/go/build/constraint/expr.go
+++ b/src/go/build/constraint/expr.go
@@ -5,9 +5,7 @@
// Package constraint implements parsing and evaluation of build constraint lines.
// See https://golang.org/cmd/go/#hdr-Build_constraints for documentation about build constraints themselves.
//
-// This package parses both the original “// +build” syntax and the “//go:build” syntax that will be added in Go 1.17.
-// The parser is being included in Go 1.16 to allow tools that need to process Go 1.17 source code
-// to still be built against the Go 1.16 release.
+// This package parses both the original “// +build” syntax and the “//go:build” syntax that was added in Go 1.17.
// See https://golang.org/design/draft-gobuild for details about the “//go:build” syntax.
package constraint
diff --git a/src/go/build/doc.go b/src/go/build/doc.go
index 262f6709af..cd1d3fd33e 100644
--- a/src/go/build/doc.go
+++ b/src/go/build/doc.go
@@ -57,12 +57,13 @@
//
// # Build Constraints
//
-// A build constraint, also known as a build tag, is a line comment that begins
+// A build constraint, also known as a build tag, is a condition under which a
+// file should be included in the package. Build constraints are given by a
+// line comment that begins
//
// //go:build
//
-// that lists the conditions under which a file should be included in the
-// package. Build constraints may also be part of a file's name
+// Build constraints may also be part of a file's name
// (for example, source_windows.go will only be included if the target
// operating system is windows).
//
diff --git a/src/go/doc/comment/parse.go b/src/go/doc/comment/parse.go
index 4de8ce710d..e8d844c491 100644
--- a/src/go/doc/comment/parse.go
+++ b/src/go/doc/comment/parse.go
@@ -326,6 +326,13 @@ func (p *Parser) Parse(text string) *Doc {
switch b := b.(type) {
case *Paragraph:
b.Text = d.parseLinkedText(string(b.Text[0].(Plain)))
+ case *List:
+ for _, i := range b.Items {
+ for _, c := range i.Content {
+ p := c.(*Paragraph)
+ p.Text = d.parseLinkedText(string(p.Text[0].(Plain)))
+ }
+ }
}
}
diff --git a/src/go/doc/comment/testdata/linklist.txt b/src/go/doc/comment/testdata/linklist.txt
new file mode 100644
index 0000000000..baf40624b3
--- /dev/null
+++ b/src/go/doc/comment/testdata/linklist.txt
@@ -0,0 +1,18 @@
+{"DocLinkBaseURL": "https://pkg.go.dev"}
+-- input --
+Did you know?
+
+ - [encoding/json.Marshal] is a doc link. So is [encoding/json.Unmarshal].
+-- text --
+Did you know?
+
+ - encoding/json.Marshal is a doc link. So is encoding/json.Unmarshal.
+-- markdown --
+Did you know?
+
+ - [encoding/json.Marshal](https://pkg.go.dev/encoding/json#Marshal) is a doc link. So is [encoding/json.Unmarshal](https://pkg.go.dev/encoding/json#Unmarshal).
+-- html --
+<p>Did you know?
+<ul>
+<li><a href="https://pkg.go.dev/encoding/json#Marshal">encoding/json.Marshal</a> is a doc link. So is <a href="https://pkg.go.dev/encoding/json#Unmarshal">encoding/json.Unmarshal</a>.
+</ul>
diff --git a/src/go/doc/comment/testdata/linklist2.txt b/src/go/doc/comment/testdata/linklist2.txt
new file mode 100644
index 0000000000..81b306100f
--- /dev/null
+++ b/src/go/doc/comment/testdata/linklist2.txt
@@ -0,0 +1,39 @@
+{"DocLinkBaseURL": "https://pkg.go.dev"}
+-- input --
+Did you know?
+
+ - [testing.T] is one doc link.
+ - So is [testing.M].
+ - So is [testing.B].
+ This is the same list paragraph.
+
+ There is [testing.PB] in this list item, too!
+-- text --
+Did you know?
+
+ - testing.T is one doc link.
+
+ - So is testing.M.
+
+ - So is testing.B. This is the same list paragraph.
+
+ There is testing.PB in this list item, too!
+-- markdown --
+Did you know?
+
+ - [testing.T](https://pkg.go.dev/testing#T) is one doc link.
+
+ - So is [testing.M](https://pkg.go.dev/testing#M).
+
+ - So is [testing.B](https://pkg.go.dev/testing#B). This is the same list paragraph.
+
+ There is [testing.PB](https://pkg.go.dev/testing#PB) in this list item, too!
+-- html --
+<p>Did you know?
+<ul>
+<li><p><a href="https://pkg.go.dev/testing#T">testing.T</a> is one doc link.
+<li><p>So is <a href="https://pkg.go.dev/testing#M">testing.M</a>.
+<li><p>So is <a href="https://pkg.go.dev/testing#B">testing.B</a>.
+This is the same list paragraph.
+<p>There is <a href="https://pkg.go.dev/testing#PB">testing.PB</a> in this list item, too!
+</ul>
diff --git a/src/go/doc/comment/testdata/linklist3.txt b/src/go/doc/comment/testdata/linklist3.txt
new file mode 100644
index 0000000000..701a54ecff
--- /dev/null
+++ b/src/go/doc/comment/testdata/linklist3.txt
@@ -0,0 +1,31 @@
+{"DocLinkBaseURL": "https://pkg.go.dev"}
+-- input --
+Cool things:
+
+ - Foo
+ - [Go]
+ - Bar
+
+[Go]: https://go.dev/
+-- text --
+Cool things:
+
+ - Foo
+ - Go
+ - Bar
+
+[Go]: https://go.dev/
+-- markdown --
+Cool things:
+
+ - Foo
+ - [Go](https://go.dev/)
+ - Bar
+
+-- html --
+<p>Cool things:
+<ul>
+<li>Foo
+<li><a href="https://go.dev/">Go</a>
+<li>Bar
+</ul>
diff --git a/src/go/doc/comment/testdata/linklist4.txt b/src/go/doc/comment/testdata/linklist4.txt
new file mode 100644
index 0000000000..db39ec4ee1
--- /dev/null
+++ b/src/go/doc/comment/testdata/linklist4.txt
@@ -0,0 +1,36 @@
+{"DocLinkBaseURL": "https://pkg.go.dev"}
+-- input --
+Cool things:
+
+ - Foo
+ - [Go] is great
+
+ [Go]: https://go.dev/
+ - Bar
+
+-- text --
+Cool things:
+
+ - Foo
+
+ - Go is great
+
+ - Bar
+
+[Go]: https://go.dev/
+-- markdown --
+Cool things:
+
+ - Foo
+
+ - [Go](https://go.dev/) is great
+
+ - Bar
+
+-- html --
+<p>Cool things:
+<ul>
+<li><p>Foo
+<li><p><a href="https://go.dev/">Go</a> is great
+<li><p>Bar
+</ul>
diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
index e3468f481f..d911c8e1d0 100644
--- a/src/go/parser/interface.go
+++ b/src/go/parser/interface.go
@@ -94,8 +94,11 @@ func ParseFile(fset *token.FileSet, filename string, src any, mode Mode) (f *ast
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
- if _, ok := e.(bailout); !ok {
+ bail, ok := e.(bailout)
+ if !ok {
panic(e)
+ } else if bail.msg != "" {
+ p.errors.Add(p.file.Position(bail.pos), bail.msg)
}
}
@@ -198,8 +201,11 @@ func ParseExprFrom(fset *token.FileSet, filename string, src any, mode Mode) (ex
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
- if _, ok := e.(bailout); !ok {
+ bail, ok := e.(bailout)
+ if !ok {
panic(e)
+ } else if bail.msg != "" {
+ p.errors.Add(p.file.Position(bail.pos), bail.msg)
}
}
p.errors.Sort()
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index ca2f24c8b8..d4ad36dc67 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -59,6 +59,10 @@ type parser struct {
inRhs bool // if set, the parser is parsing a rhs expression
imports []*ast.ImportSpec // list of imports
+
+ // nestLev is used to track and limit the recursion depth
+ // during parsing.
+ nestLev int
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
@@ -108,6 +112,24 @@ func un(p *parser) {
p.printTrace(")")
}
+// maxNestLev is the deepest we're willing to recurse during parsing
+const maxNestLev int = 1e5
+
+func incNestLev(p *parser) *parser {
+ p.nestLev++
+ if p.nestLev > maxNestLev {
+ p.error(p.pos, "exceeded max nesting depth")
+ panic(bailout{})
+ }
+ return p
+}
+
+// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
+// It is used along with incNestLev in a similar fashion to how un and trace are used.
+func decNestLev(p *parser) {
+ p.nestLev--
+}
+
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
@@ -218,8 +240,12 @@ func (p *parser) next() {
}
}
-// A bailout panic is raised to indicate early termination.
-type bailout struct{}
+// A bailout panic is raised to indicate early termination. pos and msg are
+// only populated when bailing out of object resolution.
+type bailout struct {
+ pos token.Pos
+ msg string
+}
func (p *parser) error(pos token.Pos, msg string) {
if p.trace {
@@ -1247,6 +1273,8 @@ func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
}
func (p *parser) tryIdentOrType() ast.Expr {
+ defer decNestLev(incNestLev(p))
+
switch p.tok {
case token.IDENT:
typ := p.parseTypeName(nil)
@@ -1657,7 +1685,13 @@ func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
if x == nil {
x = p.parseOperand()
}
- for {
+ // We track the nesting here rather than at the entry for the function,
+ // since it can iteratively produce a nested output, and we want to
+ // limit how deep a structure we generate.
+ var n int
+ defer func() { p.nestLev -= n }()
+ for n = 1; ; n++ {
+ incNestLev(p)
switch p.tok {
case token.PERIOD:
p.next()
@@ -1717,6 +1751,8 @@ func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
}
func (p *parser) parseUnaryExpr() ast.Expr {
+ defer decNestLev(incNestLev(p))
+
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
@@ -1806,7 +1842,13 @@ func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int, check bool) ast.Expr {
if x == nil {
x = p.parseUnaryExpr()
}
- for {
+ // We track the nesting here rather than at the entry for the function,
+ // since it can iteratively produce a nested output, and we want to
+ // limit how deep a structure we generate.
+ var n int
+ defer func() { p.nestLev -= n }()
+ for n = 1; ; n++ {
+ incNestLev(p)
op, oprec := p.tokPrec()
if oprec < prec1 {
return x
@@ -2099,6 +2141,8 @@ func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
}
func (p *parser) parseIfStmt() *ast.IfStmt {
+ defer decNestLev(incNestLev(p))
+
if p.trace {
defer un(trace(p, "IfStmt"))
}
@@ -2402,6 +2446,8 @@ func (p *parser) parseForStmt() ast.Stmt {
}
func (p *parser) parseStmt() (s ast.Stmt) {
+ defer decNestLev(incNestLev(p))
+
if p.trace {
defer un(trace(p, "Statement"))
}
diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
index a4f882d368..0c278924c9 100644
--- a/src/go/parser/parser_test.go
+++ b/src/go/parser/parser_test.go
@@ -577,3 +577,168 @@ type x int // comment
t.Errorf("got %q, want %q", comment, "// comment")
}
}
+
+var parseDepthTests = []struct {
+ name string
+ format string
+ // multipler is used when a single statement may result in more than one
+ // change in the depth level, for instance "1+(..." produces a BinaryExpr
+ // followed by a UnaryExpr, which increments the depth twice. The test
+ // case comment explains which nodes are triggering the multiple depth
+ // changes.
+ parseMultiplier int
+ // scope is true if we should also test the statement for the resolver scope
+ // depth limit.
+ scope bool
+ // scopeMultiplier does the same as parseMultiplier, but for the scope
+ // depths.
+ scopeMultiplier int
+}{
+ // The format expands the part inside « » many times.
+ // A second set of brackets nested inside the first stops the repetition,
+ // so that for example «(«1»)» expands to (((...((((1))))...))).
+ {name: "array", format: "package main; var x «[1]»int"},
+ {name: "slice", format: "package main; var x «[]»int"},
+ {name: "struct", format: "package main; var x «struct { X «int» }»", scope: true},
+ {name: "pointer", format: "package main; var x «*»int"},
+ {name: "func", format: "package main; var x «func()»int", scope: true},
+ {name: "chan", format: "package main; var x «chan »int"},
+ {name: "chan2", format: "package main; var x «<-chan »int"},
+ {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType
+ {name: "map", format: "package main; var x «map[int]»int"},
+ {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
+ {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
+ {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
+ {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr
+ {name: "dot", format: "package main; var x = «x.»x"},
+ {name: "index", format: "package main; var x = x«[1]»"},
+ {name: "slice", format: "package main; var x = x«[1:2]»"},
+ {name: "slice3", format: "package main; var x = x«[1:2:3]»"},
+ {name: "dottype", format: "package main; var x = x«.(any)»"},
+ {name: "callseq", format: "package main; var x = x«()»"},
+ {name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr
+ {name: "binary", format: "package main; var x = «1+»1"},
+ {name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr
+ {name: "unary", format: "package main; var x = «^»1"},
+ {name: "addr", format: "package main; var x = «& »x"},
+ {name: "star", format: "package main; var x = «*»x"},
+ {name: "recv", format: "package main; var x = «<-»x"},
+ {name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2}, // Parser nodes: Ident, CallExpr
+ {name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr
+ {name: "label", format: "package main; func main() { «Label:» }"},
+ {name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt
+ {name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true},
+ {name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
+ {name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
+ {name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
+ {name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
+ {name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
+ {name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
+ {name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
+ {name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
+ {name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: GoStmt, FuncLit
+ {name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: DeferStmt, FuncLit
+ {name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true},
+}
+
+// split splits pre«mid»post into pre, mid, post.
+// If the string does not have that form, split returns x, "", "".
+func split(x string) (pre, mid, post string) {
+ start, end := strings.Index(x, "«"), strings.LastIndex(x, "»")
+ if start < 0 || end < 0 {
+ return x, "", ""
+ }
+ return x[:start], x[start+len("«") : end], x[end+len("»"):]
+}
+
+func TestParseDepthLimit(t *testing.T) {
+ if testing.Short() {
+ t.Skip("test requires significant memory")
+ }
+ for _, tt := range parseDepthTests {
+ for _, size := range []string{"small", "big"} {
+ t.Run(tt.name+"/"+size, func(t *testing.T) {
+ n := maxNestLev + 1
+ if tt.parseMultiplier > 0 {
+ n /= tt.parseMultiplier
+ }
+ if size == "small" {
+ // Decrease the number of statements by 10, in order to check
+ // that we do not fail when under the limit. 10 is used to
+ // provide some wiggle room for cases where the surrounding
+ // scaffolding syntax adds some noise to the depth that changes
+ // on a per testcase basis.
+ n -= 10
+ }
+
+ pre, mid, post := split(tt.format)
+ if strings.Contains(mid, "«") {
+ left, base, right := split(mid)
+ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+ } else {
+ mid = strings.Repeat(mid, n)
+ }
+ input := pre + mid + post
+
+ fset := token.NewFileSet()
+ _, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution)
+ if size == "small" {
+ if err != nil {
+ t.Errorf("ParseFile(...): %v (want success)", err)
+ }
+ } else {
+ expected := "exceeded max nesting depth"
+ if err == nil || !strings.HasSuffix(err.Error(), expected) {
+ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+ }
+ }
+ })
+ }
+ }
+}
+
+func TestScopeDepthLimit(t *testing.T) {
+ for _, tt := range parseDepthTests {
+ if !tt.scope {
+ continue
+ }
+ for _, size := range []string{"small", "big"} {
+ t.Run(tt.name+"/"+size, func(t *testing.T) {
+ n := maxScopeDepth + 1
+ if tt.scopeMultiplier > 0 {
+ n /= tt.scopeMultiplier
+ }
+ if size == "small" {
+ // Decrease the number of statements by 10, in order to check
+ // that we do not fail when under the limit. 10 is used to
+ // provide some wiggle room for cases where the surrounding
+ // scaffolding syntax adds some noise to the depth that changes
+ // on a per testcase basis.
+ n -= 10
+ }
+
+ pre, mid, post := split(tt.format)
+ if strings.Contains(mid, "«") {
+ left, base, right := split(mid)
+ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+ } else {
+ mid = strings.Repeat(mid, n)
+ }
+ input := pre + mid + post
+
+ fset := token.NewFileSet()
+ _, err := ParseFile(fset, "", input, DeclarationErrors)
+ if size == "small" {
+ if err != nil {
+ t.Errorf("ParseFile(...): %v (want success)", err)
+ }
+ } else {
+ expected := "exceeded max scope depth during object resolution"
+ if err == nil || !strings.HasSuffix(err.Error(), expected) {
+ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+ }
+ }
+ })
+ }
+ }
+}
diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go
index 767a5e20ad..f8ff618eba 100644
--- a/src/go/parser/resolver.go
+++ b/src/go/parser/resolver.go
@@ -54,6 +54,8 @@ func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, str
file.Unresolved = r.unresolved[0:i]
}
+const maxScopeDepth int = 1e3
+
type resolver struct {
handle *token.File
declErr func(token.Pos, string)
@@ -85,16 +87,19 @@ func (r *resolver) sprintf(format string, args ...any) string {
}
func (r *resolver) openScope(pos token.Pos) {
+ r.depth++
+ if r.depth > maxScopeDepth {
+ panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"})
+ }
if debugResolve {
r.trace("opening scope @%v", pos)
- r.depth++
}
r.topScope = ast.NewScope(r.topScope)
}
func (r *resolver) closeScope() {
+ r.depth--
if debugResolve {
- r.depth--
r.trace("closing scope")
}
r.topScope = r.topScope.Outer
diff --git a/src/go/types/api.go b/src/go/types/api.go
index 0915d6a6ee..5e7be29b3c 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -413,7 +413,8 @@ func (conf *Config) Check(path string, fset *token.FileSet, files []*ast.File, i
// AssertableTo reports whether a value of type V can be asserted to have type T.
//
-// The behavior of AssertableTo is undefined in two cases:
+// The behavior of AssertableTo is unspecified in three cases:
+// - if T is Typ[Invalid]
// - if V is a generalized interface; i.e., an interface that may only be used
// as a type constraint in Go code
// - if T is an uninstantiated generic type
@@ -429,8 +430,8 @@ func AssertableTo(V *Interface, T Type) bool {
// AssignableTo reports whether a value of type V is assignable to a variable
// of type T.
//
-// The behavior of AssignableTo is undefined if V or T is an uninstantiated
-// generic type.
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
func AssignableTo(V, T Type) bool {
x := operand{mode: value, typ: V}
ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
@@ -440,8 +441,8 @@ func AssignableTo(V, T Type) bool {
// ConvertibleTo reports whether a value of type V is convertible to a value of
// type T.
//
-// The behavior of ConvertibleTo is undefined if V or T is an uninstantiated
-// generic type.
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
func ConvertibleTo(V, T Type) bool {
x := operand{mode: value, typ: V}
return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
@@ -449,8 +450,8 @@ func ConvertibleTo(V, T Type) bool {
// Implements reports whether type V implements interface T.
//
-// The behavior of Implements is undefined if V is an uninstantiated generic
-// type.
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
func Implements(V Type, T *Interface) bool {
if T.Empty() {
// All types (even Typ[Invalid]) implement the empty interface.
diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go
index 7b67dca2b8..cb5253b453 100644
--- a/src/go/types/sizes.go
+++ b/src/go/types/sizes.go
@@ -53,6 +53,17 @@ func (s *StdSizes) Alignof(T Type) int64 {
// is the same as unsafe.Alignof(x[0]), but at least 1."
return s.Alignof(t.elem)
case *Struct:
+ if len(t.fields) == 0 && isSyncAtomicAlign64(T) {
+ // Special case: sync/atomic.align64 is an
+ // empty struct we recognize as a signal that
+ // the struct it contains must be
+ // 64-bit-aligned.
+ //
+ // This logic is equivalent to the logic in
+ // cmd/compile/internal/types/size.go:calcStructOffset
+ return 8
+ }
+
// spec: "For a variable x of struct type: unsafe.Alignof(x)
// is the largest of the values unsafe.Alignof(x.f) for each
// field f of x, but at least 1."
@@ -93,6 +104,18 @@ func (s *StdSizes) Alignof(T Type) int64 {
return a
}
+func isSyncAtomicAlign64(T Type) bool {
+ named, ok := T.(*Named)
+ if !ok {
+ return false
+ }
+ obj := named.Obj()
+ return obj.Name() == "align64" &&
+ obj.Pkg() != nil &&
+ (obj.Pkg().Path() == "sync/atomic" ||
+ obj.Pkg().Path() == "runtime/internal/atomic")
+}
+
func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
offsets := make([]int64, len(fields))
var o int64
diff --git a/src/go/types/sizes_test.go b/src/go/types/sizes_test.go
index 539b4e37c1..740072f1dc 100644
--- a/src/go/types/sizes_test.go
+++ b/src/go/types/sizes_test.go
@@ -17,13 +17,16 @@ import (
// findStructType typechecks src and returns the first struct type encountered.
func findStructType(t *testing.T, src string) *types.Struct {
+ return findStructTypeConfig(t, src, &types.Config{})
+}
+
+func findStructTypeConfig(t *testing.T, src string, conf *types.Config) *types.Struct {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "x.go", src, 0)
if err != nil {
t.Fatal(err)
}
info := types.Info{Types: make(map[ast.Expr]types.TypeAndValue)}
- var conf types.Config
_, err = conf.Check("x", fset, []*ast.File{f}, &info)
if err != nil {
t.Fatal(err)
@@ -110,3 +113,39 @@ const _ = unsafe.Offsetof(struct{ x int64 }{}.x)
_ = conf.Sizes.Alignof(tv.Type)
}
}
+
+// Issue #53884.
+func TestAtomicAlign(t *testing.T) {
+ const src = `
+package main
+
+import "sync/atomic"
+
+var s struct {
+ x int32
+ y atomic.Int64
+ z int64
+}
+`
+
+ want := []int64{0, 8, 16}
+ for _, arch := range []string{"386", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ conf := types.Config{
+ Importer: importer.Default(),
+ Sizes: types.SizesFor("gc", arch),
+ }
+ ts := findStructTypeConfig(t, src, &conf)
+ var fields []*types.Var
+ // Make a copy manually :(
+ for i := 0; i < ts.NumFields(); i++ {
+ fields = append(fields, ts.Field(i))
+ }
+
+ offsets := conf.Sizes.Offsetsof(fields)
+ if offsets[0] != want[0] || offsets[1] != want[1] || offsets[2] != want[2] {
+ t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, want)
+ }
+ })
+ }
+}
diff --git a/src/go/types/testdata/fixedbugs/issue39634.go b/src/go/types/testdata/fixedbugs/issue39634.go
index 8cba2e735a..ce84299a61 100644
--- a/src/go/types/testdata/fixedbugs/issue39634.go
+++ b/src/go/types/testdata/fixedbugs/issue39634.go
@@ -31,10 +31,8 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-// Embedding stand-alone type parameters is not permitted for now. Disabled.
-// type foo8[A any] interface { ~A }
-// func bar8[A foo8[A]](a A) {}
-// func main8() {}
+type foo8[A any] interface { ~A /* ERROR cannot be a type parameter */ }
+func bar8[A foo8[A]](a A) {}
// crash 9
type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
@@ -74,10 +72,9 @@ func F20[t Z20]() { F20(t /* ERROR invalid composite literal type */ {}) }
type Z21 /* ERROR illegal cycle */ interface{ Z21 }
func F21[T Z21]() { ( /* ERROR not used */ F21[Z21]) }
-// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
-// // crash 24
-// type T24[P any] P
-// func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
+// crash 24
+type T24[P any] P // ERROR cannot use a type parameter as RHS in type declaration
+func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
// crash 25
type T25[A any] int
diff --git a/src/image/jpeg/reader_test.go b/src/image/jpeg/reader_test.go
index bf07fadede..6fdb6c4449 100644
--- a/src/image/jpeg/reader_test.go
+++ b/src/image/jpeg/reader_test.go
@@ -13,6 +13,7 @@ import (
"io"
"math/rand"
"os"
+ "runtime/debug"
"strings"
"testing"
"time"
@@ -247,18 +248,16 @@ func TestLargeImageWithShortData(t *testing.T) {
"\x20\x36\x9f\x78\x64\x75\xe6\xab\x7d\xb2\xde\x29\x70\xd3\x20\x27" +
"\xde\xaf\xa4\xf0\xca\x9f\x24\xa8\xdf\x46\xa8\x24\x84\x96\xe3\x77" +
"\xf9\x2e\xe0\x0a\x62\x7f\xdf\xd9"
- c := make(chan error, 1)
- go func() {
- _, err := Decode(strings.NewReader(input))
- c <- err
- }()
- select {
- case err := <-c:
- if err == nil {
- t.Fatalf("got nil error, want non-nil")
- }
- case <-time.After(3 * time.Second):
- t.Fatalf("timed out")
+
+ timer := time.AfterFunc(30*time.Second, func() {
+ debug.SetTraceback("all")
+ panic("TestLargeImageWithShortData stuck in Decode")
+ })
+ defer timer.Stop()
+
+ _, err := Decode(strings.NewReader(input))
+ if err == nil {
+ t.Fatalf("got nil error, want non-nil")
}
}
diff --git a/src/internal/trace/goroutines.go b/src/internal/trace/goroutines.go
index a5fda489be..5da90e0b6d 100644
--- a/src/internal/trace/goroutines.go
+++ b/src/internal/trace/goroutines.go
@@ -4,7 +4,10 @@
package trace
-import "sort"
+import (
+ "sort"
+ "strings"
+)
// GDesc contains statistics and execution details of a single goroutine.
type GDesc struct {
@@ -126,10 +129,17 @@ func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {
finalStat := g.snapshotStat(lastTs, activeGCStartTime)
g.GExecutionStat = finalStat
- for _, s := range g.activeRegions {
- s.End = trigger
- s.GExecutionStat = finalStat.sub(s.GExecutionStat)
- g.Regions = append(g.Regions, s)
+
+ // System goroutines are never part of regions, even though they
+ // "inherit" a task due to creation (EvGoCreate) from within a region.
+ // This may happen e.g. if the first GC is triggered within a region,
+ // starting the GC worker goroutines.
+ if !IsSystemGoroutine(g.Name) {
+ for _, s := range g.activeRegions {
+ s.End = trigger
+ s.GExecutionStat = finalStat.sub(s.GExecutionStat)
+ g.Regions = append(g.Regions, s)
+ }
}
*(g.gdesc) = gdesc{}
}
@@ -158,10 +168,13 @@ func GoroutineStats(events []*Event) map[uint64]*GDesc {
case EvGoCreate:
g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gdesc: new(gdesc)}
g.blockSchedTime = ev.Ts
- // When a goroutine is newly created, inherit the
- // task of the active region. For ease handling of
- // this case, we create a fake region description with
- // the task id.
+ // When a goroutine is newly created, inherit the task
+ // of the active region. For ease handling of this
+ // case, we create a fake region description with the
+ // task id. This isn't strictly necessary as this
+ // goroutine may not be assosciated with the task, but
+ // it can be convenient to see all children created
+ // during a region.
if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gdesc.activeRegions) > 0 {
regions := creatorG.gdesc.activeRegions
s := regions[len(regions)-1]
@@ -336,3 +349,9 @@ func RelatedGoroutines(events []*Event, goid uint64) map[uint64]bool {
gmap[0] = true // for GC events
return gmap
}
+
+func IsSystemGoroutine(entryFn string) bool {
+ // This mimics runtime.isSystemGoroutine as closely as
+ // possible.
+ return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
+}
diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go
index 45d9cb61b9..0e529cd05d 100644
--- a/src/io/fs/glob.go
+++ b/src/io/fs/glob.go
@@ -31,6 +31,16 @@ type GlobFS interface {
// Otherwise, Glob uses ReadDir to traverse the directory tree
// and look for matches for the pattern.
func Glob(fsys FS, pattern string) (matches []string, err error) {
+ return globWithLimit(fsys, pattern, 0)
+}
+
+func globWithLimit(fsys FS, pattern string, depth int) (matches []string, err error) {
+ // This limit is added to prevent stack exhaustion issues. See
+ // CVE-2022-30630.
+ const pathSeparatorsLimit = 10000
+ if depth > pathSeparatorsLimit {
+ return nil, path.ErrBadPattern
+ }
if fsys, ok := fsys.(GlobFS); ok {
return fsys.Glob(pattern)
}
@@ -59,9 +69,9 @@ func Glob(fsys FS, pattern string) (matches []string, err error) {
}
var m []string
- m, err = Glob(fsys, dir)
+ m, err = globWithLimit(fsys, dir, depth+1)
if err != nil {
- return
+ return nil, err
}
for _, d := range m {
matches, err = glob(fsys, d, file, matches)
diff --git a/src/io/fs/glob_test.go b/src/io/fs/glob_test.go
index f19bebed77..d052eab371 100644
--- a/src/io/fs/glob_test.go
+++ b/src/io/fs/glob_test.go
@@ -8,6 +8,7 @@ import (
. "io/fs"
"os"
"path"
+ "strings"
"testing"
)
@@ -55,6 +56,15 @@ func TestGlobError(t *testing.T) {
}
}
+func TestCVE202230630(t *testing.T) {
+ // Prior to CVE-2022-30630, a stack exhaustion would occur given a large
+ // number of separators. There is now a limit of 10,000.
+ _, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001))
+ if err != path.ErrBadPattern {
+ t.Fatalf("Glob returned err=%v, want %v", err, path.ErrBadPattern)
+ }
+}
+
// contains reports whether vector contains the string s.
func contains(vector []string, s string) bool {
for _, elem := range vector {
diff --git a/src/net/http/fs.go b/src/net/http/fs.go
index 7a1d5f4be5..4f144ebad2 100644
--- a/src/net/http/fs.go
+++ b/src/net/http/fs.go
@@ -541,6 +541,7 @@ func writeNotModified(w ResponseWriter) {
h := w.Header()
delete(h, "Content-Type")
delete(h, "Content-Length")
+ delete(h, "Content-Encoding")
if h.Get("Etag") != "" {
delete(h, "Last-Modified")
}
diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go
index d627dfd4be..4be561cdfa 100644
--- a/src/net/http/fs_test.go
+++ b/src/net/http/fs_test.go
@@ -564,6 +564,60 @@ func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
}
}
+// Tests that ServeFile does not generate representation metadata when
+// file has not been modified, as per RFC 7232 section 4.1.
+func TestServeFileNotModified_h1(t *testing.T) { testServeFileNotModified(t, h1Mode) }
+func TestServeFileNotModified_h2(t *testing.T) { testServeFileNotModified(t, h2Mode) }
+func testServeFileNotModified(t *testing.T, h2 bool) {
+ defer afterTest(t)
+ cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("Content-Encoding", "foo")
+ w.Header().Set("Etag", `"123"`)
+ ServeFile(w, r, "testdata/file")
+
+ // Because the testdata is so small, it would fit in
+ // both the h1 and h2 Server's write buffers. For h1,
+ // sendfile is used, though, forcing a header flush at
+ // the io.Copy. http2 doesn't do a header flush so
+ // buffers all 11 bytes and then adds its own
+ // Content-Length. To prevent the Server's
+ // Content-Length and test ServeFile only, flush here.
+ w.(Flusher).Flush()
+ }))
+ defer cst.close()
+ req, err := NewRequest("GET", cst.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Set("If-None-Match", `"123"`)
+ resp, err := cst.c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ t.Fatal("reading Body:", err)
+ }
+ if len(b) != 0 {
+ t.Errorf("non-empty body")
+ }
+ if g, e := resp.StatusCode, StatusNotModified; g != e {
+ t.Errorf("status mismatch: got %d, want %d", g, e)
+ }
+ // HTTP1 transport sets ContentLength to 0.
+ if g, e1, e2 := resp.ContentLength, int64(-1), int64(0); g != e1 && g != e2 {
+ t.Errorf("Content-Length mismatch: got %d, want %d or %d", g, e1, e2)
+ }
+ if resp.Header.Get("Content-Type") != "" {
+ t.Errorf("Content-Type present, but it should not be")
+ }
+ if resp.Header.Get("Content-Encoding") != "" {
+ t.Errorf("Content-Encoding present, but it should not be")
+ }
+}
+
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
diff --git a/src/net/http/header.go b/src/net/http/header.go
index 6437f2d2c0..e0b342c63c 100644
--- a/src/net/http/header.go
+++ b/src/net/http/header.go
@@ -43,7 +43,8 @@ func (h Header) Set(key, value string) {
// Get gets the first value associated with the given key. If
// there are no values associated with the key, Get returns "".
// It is case insensitive; textproto.CanonicalMIMEHeaderKey is
-// used to canonicalize the provided key. To use non-canonical keys,
+// used to canonicalize the provided key. Get assumes that all
+// keys are stored in canonical form. To use non-canonical keys,
// access the map directly.
func (h Header) Get(key string) string {
return textproto.MIMEHeader(h).Get(key)
diff --git a/src/net/http/request.go b/src/net/http/request.go
index d091f3c056..cead91d3d4 100644
--- a/src/net/http/request.go
+++ b/src/net/http/request.go
@@ -1126,8 +1126,8 @@ func readRequest(b *bufio.Reader) (req *Request, err error) {
// MaxBytesReader is similar to io.LimitReader but is intended for
// limiting the size of incoming request bodies. In contrast to
// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// MaxBytesError for a Read beyond the limit, and closes the
-// underlying reader when its Close method is called.
+// non-nil error of type *MaxBytesError for a Read beyond the limit,
+// and closes the underlying reader when its Close method is called.
//
// MaxBytesReader prevents clients from accidentally or maliciously
// sending a large request and wasting server resources. If possible,
diff --git a/src/net/http/server.go b/src/net/http/server.go
index bc3a4633da..87dd412984 100644
--- a/src/net/http/server.go
+++ b/src/net/http/server.go
@@ -2690,6 +2690,8 @@ type Server struct {
activeConn map[*conn]struct{}
doneChan chan struct{}
onShutdown []func()
+
+ listenerGroup sync.WaitGroup
}
func (s *Server) getDoneChan() <-chan struct{} {
@@ -2732,6 +2734,15 @@ func (srv *Server) Close() error {
defer srv.mu.Unlock()
srv.closeDoneChanLocked()
err := srv.closeListenersLocked()
+
+ // Unlock srv.mu while waiting for listenerGroup.
+ // The group Add and Done calls are made with srv.mu held,
+ // to avoid adding a new listener in the window between
+ // us setting inShutdown above and waiting here.
+ srv.mu.Unlock()
+ srv.listenerGroup.Wait()
+ srv.mu.Lock()
+
for c := range srv.activeConn {
c.rwc.Close()
delete(srv.activeConn, c)
@@ -2778,6 +2789,7 @@ func (srv *Server) Shutdown(ctx context.Context) error {
go f()
}
srv.mu.Unlock()
+ srv.listenerGroup.Wait()
pollIntervalBase := time.Millisecond
nextPollInterval := func() time.Duration {
@@ -2794,7 +2806,7 @@ func (srv *Server) Shutdown(ctx context.Context) error {
timer := time.NewTimer(nextPollInterval())
defer timer.Stop()
for {
- if srv.closeIdleConns() && srv.numListeners() == 0 {
+ if srv.closeIdleConns() {
return lnerr
}
select {
@@ -2817,12 +2829,6 @@ func (srv *Server) RegisterOnShutdown(f func()) {
srv.mu.Unlock()
}
-func (s *Server) numListeners() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return len(s.listeners)
-}
-
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
@@ -3157,8 +3163,10 @@ func (s *Server) trackListener(ln *net.Listener, add bool) bool {
return false
}
s.listeners[ln] = struct{}{}
+ s.listenerGroup.Add(1)
} else {
delete(s.listeners, ln)
+ s.listenerGroup.Done()
}
return true
}
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index f2d538b04a..e470a6c080 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -525,7 +525,8 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) {
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
req.closeBody()
- return nil, fmt.Errorf("net/http: invalid header field value %q for key %v", v, k)
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("net/http: invalid header field value for %q", k)
}
}
}
diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
index 84868e2c5e..cba35db257 100644
--- a/src/net/http/transport_test.go
+++ b/src/net/http/transport_test.go
@@ -6085,14 +6085,14 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
Method: " ",
URL: u,
},
- wantErr: "invalid method",
+ wantErr: `invalid method " "`,
},
{
name: "nil URL",
req: &Request{
Method: "GET",
},
- wantErr: "nil Request.URL",
+ wantErr: `nil Request.URL`,
},
{
name: "invalid header key",
@@ -6101,7 +6101,7 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
Header: Header{"💡": {"emoji"}},
URL: u,
},
- wantErr: "invalid header field name",
+ wantErr: `invalid header field name "💡"`,
},
{
name: "invalid header value",
@@ -6110,7 +6110,7 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
Header: Header{"key": {"\x19"}},
URL: u,
},
- wantErr: "invalid header field value",
+ wantErr: `invalid header field value for "key"`,
},
{
name: "non HTTP(s) scheme",
@@ -6118,7 +6118,7 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
Method: "POST",
URL: &url.URL{Scheme: "faux"},
},
- wantErr: "unsupported protocol scheme",
+ wantErr: `unsupported protocol scheme "faux"`,
},
{
name: "no Host in URL",
@@ -6126,7 +6126,7 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
Method: "POST",
URL: &url.URL{Scheme: "http"},
},
- wantErr: "no Host",
+ wantErr: `no Host in request URL`,
},
}
@@ -6142,8 +6142,8 @@ func TestTransportClosesBodyOnInvalidRequests(t *testing.T) {
if !bc {
t.Fatal("Expected body to have been closed")
}
- if g, w := err.Error(), tt.wantErr; !strings.Contains(g, w) {
- t.Fatalf("Error mismatch\n\t%q\ndoes not contain\n\t%q", g, w)
+ if g, w := err.Error(), tt.wantErr; !strings.HasSuffix(g, w) {
+ t.Fatalf("Error mismatch: %q does not end with %q", g, w)
}
})
}
diff --git a/src/net/url/url.go b/src/net/url/url.go
index db4d6385e3..e82ae6aeef 100644
--- a/src/net/url/url.go
+++ b/src/net/url/url.go
@@ -1193,7 +1193,7 @@ func (u *URL) UnmarshalBinary(text []byte) error {
func (u *URL) JoinPath(elem ...string) *URL {
url := *u
if len(elem) > 0 {
- elem = append([]string{u.Path}, elem...)
+ elem = append([]string{u.EscapedPath()}, elem...)
p := path.Join(elem...)
// path.Join will remove any trailing slashes.
// Preserve at least one.
diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go
index 478cc34872..263eddffcf 100644
--- a/src/net/url/url_test.go
+++ b/src/net/url/url_test.go
@@ -2120,6 +2120,16 @@ func TestJoinPath(t *testing.T) {
out: "https://go.googlesource.com/",
},
{
+ base: "https://go.googlesource.com/a%2fb",
+ elem: []string{"c"},
+ out: "https://go.googlesource.com/a%2fb/c",
+ },
+ {
+ base: "https://go.googlesource.com/a%2fb",
+ elem: []string{"c%2fd"},
+ out: "https://go.googlesource.com/a%2fb/c%2fd",
+ },
+ {
base: "/",
elem: nil,
out: "/",
diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go
index f0dc7dab7d..57d18420bb 100644
--- a/src/os/exec/exec.go
+++ b/src/os/exec/exec.go
@@ -462,8 +462,8 @@ func lookExtensions(path, dir string) (string, error) {
//
// If Start returns successfully, the c.Process field will be set.
//
-// The Wait method will return the exit code and release associated resources
-// once the command exits.
+// After a successful call to Start the Wait method must be called in
+// order to release associated system resources.
func (c *Cmd) Start() error {
if c.Path == "" && c.Err == nil && c.lookPathErr == nil {
c.Err = errors.New("exec: no command")
diff --git a/src/os/path_windows.go b/src/os/path_windows.go
index a96245f358..3356908a36 100644
--- a/src/os/path_windows.go
+++ b/src/os/path_windows.go
@@ -11,7 +11,7 @@ const (
// IsPathSeparator reports whether c is a directory separator character.
func IsPathSeparator(c uint8) bool {
- // NOTE: Windows accept / as path separator.
+ // NOTE: Windows accepts / as path separator.
return c == '\\' || c == '/'
}
diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
index 847a78133d..b5cc4b8cf3 100644
--- a/src/path/filepath/match.go
+++ b/src/path/filepath/match.go
@@ -240,6 +240,16 @@ func getEsc(chunk string) (r rune, nchunk string, err error) {
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
func Glob(pattern string) (matches []string, err error) {
+ return globWithLimit(pattern, 0)
+}
+
+func globWithLimit(pattern string, depth int) (matches []string, err error) {
+ // This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
+ const pathSeparatorsLimit = 10000
+ if depth == pathSeparatorsLimit {
+ return nil, ErrBadPattern
+ }
+
// Check pattern is well-formed.
if _, err := Match(pattern, ""); err != nil {
return nil, err
@@ -269,7 +279,7 @@ func Glob(pattern string) (matches []string, err error) {
}
var m []string
- m, err = Glob(dir)
+ m, err = globWithLimit(dir, depth+1)
if err != nil {
return
}
diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
index 375c41a7e9..d6282596fe 100644
--- a/src/path/filepath/match_test.go
+++ b/src/path/filepath/match_test.go
@@ -155,6 +155,16 @@ func TestGlob(t *testing.T) {
}
}
+func TestCVE202230632(t *testing.T) {
+ // Prior to CVE-2022-30632, this would cause a stack exhaustion given a
+ // large number of separators (more than 4,000,000). There is now a limit
+ // of 10,000.
+ _, err := Glob("/*" + strings.Repeat("/", 10001))
+ if err != ErrBadPattern {
+ t.Fatalf("Glob returned err=%v, want ErrBadPattern", err)
+ }
+}
+
func TestGlobError(t *testing.T) {
bad := []string{`[]`, `nonexist/[]`}
for _, pattern := range bad {
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 7c785900db..1f484fb9b6 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -173,10 +173,6 @@ func (c *mcache) refill(spc spanClass) {
bytesAllocated := slotsUsed * int64(s.elemsize)
gcController.totalAlloc.Add(bytesAllocated)
- // Update heapLive and flush scanAlloc.
- gcController.update(bytesAllocated, int64(c.scanAlloc))
- c.scanAlloc = 0
-
// Clear the second allocCount just to be safe.
s.allocCountBeforeCache = 0
}
@@ -198,6 +194,23 @@ func (c *mcache) refill(spc spanClass) {
// Store the current alloc count for accounting later.
s.allocCountBeforeCache = s.allocCount
+ // Update heapLive and flush scanAlloc.
+ //
+ // We have not yet allocated anything new into the span, but we
+ // assume that all of its slots will get used, so this makes
+ // heapLive an overestimate.
+ //
+ // When the span gets uncached, we'll fix up this overestimate
+ // if necessary (see releaseAll).
+ //
+ // We pick an overestimate here because an underestimate leads
+ // the pacer to believe that it's in better shape than it is,
+ // which appears to lead to more memory used. See #53738 for
+ // more details.
+ usedBytes := uintptr(s.allocCount) * s.elemsize
+ gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
+ c.scanAlloc = 0
+
c.alloc[spc] = s
}
@@ -247,6 +260,8 @@ func (c *mcache) releaseAll() {
scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0
+ sg := mheap_.sweepgen
+ dHeapLive := int64(0)
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
@@ -262,6 +277,15 @@ func (c *mcache) releaseAll() {
// We assumed earlier that the full span gets allocated.
gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
+ if s.sweepgen != sg+1 {
+ // refill conservatively counted unallocated slots in gcController.heapLive.
+ // Undo this.
+ //
+ // If this span was cached before sweep, then gcController.heapLive was totally
+ // recomputed since caching this span, so we don't do this for stale spans.
+ dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+ }
+
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan
@@ -277,8 +301,8 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0
memstats.heapStats.release()
- // Updated heapScan.
- gcController.update(0, scanAlloc)
+ // Update heapLive and heapScan.
+ gcController.update(dHeapLive, scanAlloc)
}
// prepareForSweep flushes c if the system has entered a new sweep phase
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index ac3446db36..2d9fd27748 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -439,7 +439,26 @@ func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger g
c.fractionalMarkTime = 0
c.idleMarkTime = 0
c.markStartTime = markStartTime
- c.triggered = c.heapLive
+
+ // TODO(mknyszek): This is supposed to be the actual trigger point for the heap, but
+ // causes regressions in memory use. The cause is that the PI controller used to smooth
+ // the cons/mark ratio measurements tends to flail when using the less accurate precomputed
+ // trigger for the cons/mark calculation, and this results in the controller being more
+ // conservative about steady-states it tries to find in the future.
+ //
+ // This conservatism is transient, but these transient states tend to matter for short-lived
+ // programs, especially because the PI controller is overdamped, partially because it is
+ // configured with a relatively large time constant.
+ //
+ // Ultimately, I think this is just two mistakes piled on one another: the choice of a swingy
+ // smoothing function that recalls a fairly long history (due to its overdamped time constant)
+ // coupled with an inaccurate cons/mark calculation. It just so happens this works better
+ // today, and it makes it harder to change things in the future.
+ //
+ // This is described in #53738. Fix this for #53892 by changing back to the actual trigger
+ // point and simplifying the smoothing function.
+ heapTrigger, heapGoal := c.trigger()
+ c.triggered = heapTrigger
// Compute the background mark utilization goal. In general,
// this may not come out exactly. We round the number of
@@ -501,7 +520,6 @@ func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger g
c.revise()
if debug.gcpacertrace > 0 {
- heapGoal := c.heapGoal()
assistRatio := c.assistWorkPerByte.Load()
print("pacer: assist ratio=", assistRatio,
" (scan ", gcController.heapScan>>20, " MB in ",
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 6bf3ddda8c..99a67b9a3a 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -947,7 +947,7 @@ func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Point
goroutineProfile.active = true
goroutineProfile.records = p
goroutineProfile.labels = labels
- // The finializer goroutine needs special handling because it can vary over
+ // The finalizer goroutine needs special handling because it can vary over
// time between being a user goroutine (eligible for this profile) and a
// system goroutine (to be excluded). Pick one before restarting the world.
if fing != nil {
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index 13bc3be4ab..f0e7c6ae70 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -439,7 +439,7 @@ func exit(e int32) {
var tmp [32]byte
sl := itoa(tmp[:len(tmp)-1], uint64(e))
// Don't append, rely on the existing data being zero.
- status = tmp[:len(sl)+1]
+ status = sl[:len(sl)+1]
}
goexitsall(&status[0])
exits(&status[0])
diff --git a/src/runtime/race/README b/src/runtime/race/README
index eb18ad600b..ad8f55fb73 100644
--- a/src/runtime/race/README
+++ b/src/runtime/race/README
@@ -4,9 +4,9 @@ the LLVM project (https://github.com/llvm/llvm-project/tree/main/compiler-rt).
To update the .syso files use golang.org/x/build/cmd/racebuild.
-race_darwin_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_freebsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_linux_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_darwin_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_freebsd_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_linux_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
race_netbsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
race_windows_amd64.syso built with LLVM 89f7ccea6f6488c443655880229c54db1f180153 and Go f62d3202bf9dbb3a00ad2a2c63ff4fa4188c5d3b.
diff --git a/src/runtime/race/race_darwin_amd64.syso b/src/runtime/race/race_darwin_amd64.syso
index dde17add91..e5d848c883 100644
--- a/src/runtime/race/race_darwin_amd64.syso
+++ b/src/runtime/race/race_darwin_amd64.syso
Binary files differ
diff --git a/src/runtime/race/race_freebsd_amd64.syso b/src/runtime/race/race_freebsd_amd64.syso
index 8be9ff7a64..b3a438347f 100644
--- a/src/runtime/race/race_freebsd_amd64.syso
+++ b/src/runtime/race/race_freebsd_amd64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_amd64.syso b/src/runtime/race/race_linux_amd64.syso
index a23064efac..6885610f25 100644
--- a/src/runtime/race/race_linux_amd64.syso
+++ b/src/runtime/race/race_linux_amd64.syso
Binary files differ
diff --git a/src/runtime/time.go b/src/runtime/time.go
index aec39083b4..80b0bfb72c 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -397,7 +397,11 @@ func dodeltimer(pp *p, i int) int {
if i == 0 {
updateTimer0When(pp)
}
- atomic.Xadd(&pp.numTimers, -1)
+ n := atomic.Xadd(&pp.numTimers, -1)
+ if n == 0 {
+ // If there are no timers, then clearly none are modified.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+ }
return smallestChanged
}
@@ -421,7 +425,11 @@ func dodeltimer0(pp *p) {
siftdownTimer(pp.timers, 0)
}
updateTimer0When(pp)
- atomic.Xadd(&pp.numTimers, -1)
+ n := atomic.Xadd(&pp.numTimers, -1)
+ if n == 0 {
+ // If there are no timers, then clearly none are modified.
+ atomic.Store64(&pp.timerModifiedEarliest, 0)
+ }
}
// modtimer modifies an existing timer.
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 197683bc69..49147ff838 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -1120,7 +1120,7 @@ func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
// system (that is, the finalizer goroutine) is considered a user
// goroutine.
func isSystemGoroutine(gp *g, fixed bool) bool {
- // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine.
+ // Keep this in sync with internal/trace.IsSystemGoroutine.
f := findfunc(gp.startpc)
if !f.valid() {
return false
diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go
index ede8247da9..554aad412d 100644
--- a/src/syscall/exec_linux.go
+++ b/src/syscall/exec_linux.go
@@ -43,8 +43,8 @@ type SysProcAttr struct {
// the descriptor of the controlling TTY.
// Unlike Setctty, in this case Ctty must be a descriptor
// number in the parent process.
- Foreground bool
- Pgid int // Child's process group ID if Setpgid.
+ Foreground bool
+ Pgid int // Child's process group ID if Setpgid.
// Pdeathsig, if non-zero, is a signal that the kernel will send to
// the child process when the creating thread dies. Note that the signal
// is sent on thread termination, which may happen before process termination.
diff --git a/test/fixedbugs/issue27938.go b/test/fixedbugs/issue27938.go
index 2589e1eff8..5392c65f1f 100644
--- a/test/fixedbugs/issue27938.go
+++ b/test/fixedbugs/issue27938.go
@@ -11,13 +11,13 @@
package p
type _ struct {
- F sync.Mutex // ERROR "undefined: sync|expected package"
+ F sync.Mutex // ERROR "undefined: sync|expected package|reference to undefined name"
}
type _ struct {
- sync.Mutex // ERROR "undefined: sync|expected package"
+ sync.Mutex // ERROR "undefined: sync|expected package|reference to undefined name"
}
type _ interface {
- sync.Mutex // ERROR "undefined: sync|expected package|expected signature or type name"
+ sync.Mutex // ERROR "undefined: sync|expected package|expected signature or type name|reference to undefined name"
}
diff --git a/test/fixedbugs/issue53600.go b/test/fixedbugs/issue53600.go
index fd3a9e5e47..ead40b57af 100644
--- a/test/fixedbugs/issue53600.go
+++ b/test/fixedbugs/issue53600.go
@@ -12,6 +12,7 @@ func main() {
f()
g()
h()
+ j(math.MinInt64)
}
func f() {
for i := int64(math.MaxInt64); i <= math.MaxInt64; i++ {
@@ -40,3 +41,13 @@ func h() {
println(i, i < 0)
}
}
+
+//go:noinline
+func j(i int64) {
+ for j := int64(math.MaxInt64); j <= i-1; j++ {
+ if j < 0 {
+ break
+ }
+ println(j)
+ }
+}
diff --git a/test/fixedbugs/issue53600.out b/test/fixedbugs/issue53600.out
index 5590c7dcfb..577b50fd2c 100644
--- a/test/fixedbugs/issue53600.out
+++ b/test/fixedbugs/issue53600.out
@@ -6,3 +6,4 @@ done
9223372036854775805 false
9223372036854775807 false
done
+9223372036854775807
diff --git a/test/fixedbugs/issue53635.go b/test/fixedbugs/issue53635.go
new file mode 100644
index 0000000000..bea5493805
--- /dev/null
+++ b/test/fixedbugs/issue53635.go
@@ -0,0 +1,31 @@
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ f[int]()
+}
+
+func f[T any]() {
+ switch []T(nil) {
+ case nil:
+ default:
+ panic("FAIL")
+ }
+
+ switch (func() T)(nil) {
+ case nil:
+ default:
+ panic("FAIL")
+ }
+
+ switch (map[int]T)(nil) {
+ case nil:
+ default:
+ panic("FAIL")
+ }
+}
diff --git a/test/fixedbugs/issue53653.go b/test/fixedbugs/issue53653.go
new file mode 100644
index 0000000000..555f7da528
--- /dev/null
+++ b/test/fixedbugs/issue53653.go
@@ -0,0 +1,42 @@
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "math"
+
+func main() {
+ f()
+ g()
+ h()
+}
+func f() {
+ for i := int64(math.MinInt64); i >= math.MinInt64; i-- {
+ if i > 0 {
+ println("done")
+ return
+ }
+ println(i, i > 0)
+ }
+}
+func g() {
+ for i := int64(math.MinInt64) + 1; i >= math.MinInt64; i-- {
+ if i > 0 {
+ println("done")
+ return
+ }
+ println(i, i > 0)
+ }
+}
+func h() {
+ for i := int64(math.MinInt64) + 2; i >= math.MinInt64; i -= 2 {
+ if i > 0 {
+ println("done")
+ return
+ }
+ println(i, i > 0)
+ }
+}
diff --git a/test/fixedbugs/issue53653.out b/test/fixedbugs/issue53653.out
new file mode 100644
index 0000000000..f699392cf3
--- /dev/null
+++ b/test/fixedbugs/issue53653.out
@@ -0,0 +1,8 @@
+-9223372036854775808 false
+done
+-9223372036854775807 false
+-9223372036854775808 false
+done
+-9223372036854775806 false
+-9223372036854775808 false
+done
diff --git a/test/loopbce.go b/test/loopbce.go
index f0c9bd0f81..4ae9a6a630 100644
--- a/test/loopbce.go
+++ b/test/loopbce.go
@@ -3,6 +3,8 @@
package main
+import "math"
+
func f0a(a []int) int {
x := 0
for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$"
@@ -281,8 +283,8 @@ func d2(a [100]int) [100]int {
func d3(a [100]int) [100]int {
for i := 0; i <= 99; i++ { // ERROR "Induction variable: limits \[0,99\], increment 1$"
- for j := 0; j <= i-1; j++ { // ERROR "Induction variable: limits \[0,\?\], increment 1$"
- a[j] = 0 // ERROR "Proved IsInBounds$"
+ for j := 0; j <= i-1; j++ {
+ a[j] = 0
a[j+1] = 0 // ERROR "Proved IsInBounds$"
a[j+2] = 0
}
@@ -290,7 +292,61 @@ func d3(a [100]int) [100]int {
return a
}
-func nobce1() {
+func d4() {
+ for i := int64(math.MaxInt64 - 9); i < math.MaxInt64-2; i += 4 { // ERROR "Induction variable: limits \[9223372036854775798,9223372036854775805\), increment 4$"
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 8); i < math.MaxInt64-2; i += 4 { // ERROR "Induction variable: limits \[9223372036854775799,9223372036854775805\), increment 4$"
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 7); i < math.MaxInt64-2; i += 4 {
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 6); i < math.MaxInt64-2; i += 4 { // ERROR "Induction variable: limits \[9223372036854775801,9223372036854775805\), increment 4$"
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 9); i <= math.MaxInt64-2; i += 4 { // ERROR "Induction variable: limits \[9223372036854775798,9223372036854775805\], increment 4$"
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 8); i <= math.MaxInt64-2; i += 4 { // ERROR "Induction variable: limits \[9223372036854775799,9223372036854775805\], increment 4$"
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 7); i <= math.MaxInt64-2; i += 4 {
+ useString("foo")
+ }
+ for i := int64(math.MaxInt64 - 6); i <= math.MaxInt64-2; i += 4 {
+ useString("foo")
+ }
+}
+
+func d5() {
+ for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \(-9223372036854775806,-9223372036854775799\], increment 4"
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \(-9223372036854775806,-9223372036854775800\], increment 4"
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 7); i > math.MinInt64+2; i -= 4 {
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \(-9223372036854775806,-9223372036854775802\], increment 4"
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775806,-9223372036854775799\], increment 4"
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775806,-9223372036854775800\], increment 4"
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 7); i >= math.MinInt64+2; i -= 4 {
+ useString("foo")
+ }
+ for i := int64(math.MinInt64 + 6); i >= math.MinInt64+2; i -= 4 {
+ useString("foo")
+ }
+}
+
+func bce1() {
// tests overflow of max-min
a := int64(9223372036854774057)
b := int64(-1547)
@@ -300,8 +356,7 @@ func nobce1() {
panic("invalid test: modulos should differ")
}
- for i := b; i < a; i += z {
- // No induction variable is possible because i will overflow a first iteration.
+ for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854774057\), increment 1337"
useString("foobar")
}
}
diff --git a/test/run.go b/test/run.go
index 49c7c157bd..fc03e1796b 100644
--- a/test/run.go
+++ b/test/run.go
@@ -1966,7 +1966,6 @@ var types2Failures32Bit = setOf(
var go118Failures = setOf(
"typeparam/nested.go", // 1.18 compiler doesn't support function-local types with generics
"typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error
- "typeparam/issue53419.go", // 1.18 compiler mishandles generic selector resolution
"typeparam/mdempsky/16.go", // 1.18 compiler uses interface shape type in failed type assertions
"typeparam/mdempsky/17.go", // 1.18 compiler mishandles implicit conversions from range loops
"typeparam/mdempsky/18.go", // 1.18 compiler mishandles implicit conversions in select statements
diff --git a/test/typeparam/issue53762.go b/test/typeparam/issue53762.go
new file mode 100644
index 0000000000..4d95988854
--- /dev/null
+++ b/test/typeparam/issue53762.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type Value[T any] interface {
+}
+
+func use[T any](v Value[T]) {
+ _, _ = v.(int)
+}
+
+func main() {
+ use(Value[int](1))
+}