From b9c20f65e5059965ce81dc3178665dde23478473 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 5 May 2023 15:41:03 -0400 Subject: [PATCH 001/109] gopls/internal/regtest/marker: migrate @diag to new marker tests This CL contains the result of a relentlessly unsatisfactory several hours trying to migrate the @diag markers to the new framework. Success was achieved for bad_test, generated, generator, noparse, rundespiteerrors, undeclared. Some trickier cases remain. Observations: - tests without go.mod files result in packages named after the absolute directory path. Is this intentional? - the new @diag markers ignore the kind and severity fields. Should we restore them? - new @diag markers are quite particular about ranges. - IIUC, the (old) no_diagnostics special case was unnecessary since the framework checks got=want not got>want. Deleted. This was supposed to be an easy preparatory step before fixing the issue below. Updates golang/go#59888 Change-Id: I5cd9c37804f1fd627186c03f5b5d4c24d336a285 Reviewed-on: https://go-review.googlesource.com/c/tools/+/492988 Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/load.go | 10 +++ gopls/internal/lsp/regtest/marker.go | 12 +++- .../lsp/testdata/analyzer/bad_test.go | 24 ------- .../lsp/testdata/generated/generated.go | 7 --- .../lsp/testdata/generated/generator.go | 5 -- gopls/internal/lsp/testdata/good/good0.go | 2 +- gopls/internal/lsp/testdata/good/good1.go | 2 +- .../lsp/testdata/noparse/noparse.go.in | 24 ------- .../rundespiteerrors/rundespiteerrors.go | 14 ----- .../internal/lsp/testdata/summary.txt.golden | 4 +- .../lsp/testdata/summary_go1.18.txt.golden | 4 +- .../lsp/testdata/summary_go1.21.txt.golden | 4 +- gopls/internal/lsp/testdata/undeclared/var.go | 14 ----- .../lsp/testdata/undeclared/var.go.golden | 51 --------------- gopls/internal/lsp/tests/util.go | 8 +-- .../marker/testdata/diagnostics/analyzers.txt | 32 ++++++++++ .../marker/testdata/diagnostics/generated.txt | 21 +++++++ .../marker/testdata/diagnostics/noparse.txt | 33 ++++++++++ .../marker/testdata/diagnostics/parseerr.txt | 26 ++++++++ .../testdata/diagnostics/rundespiteerrors.txt | 27 ++++++++ .../testdata/undeclaredname/undeclared.txt | 62 +++++++++++++++++++ 21 files changed, 230 insertions(+), 156 deletions(-) delete mode 100644 gopls/internal/lsp/testdata/analyzer/bad_test.go delete mode 100644 gopls/internal/lsp/testdata/generated/generated.go delete mode 100644 gopls/internal/lsp/testdata/generated/generator.go delete mode 100644 gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go delete mode 100644 gopls/internal/lsp/testdata/undeclared/var.go delete mode 100644 gopls/internal/lsp/testdata/undeclared/var.go.golden create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/analyzers.txt create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/generated.txt create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/rundespiteerrors.txt create mode 100644 gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index ade21ea2857..e9e79beca05 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -9,6 +9,7 @@ import ( "context" "errors" "fmt" + "log" "path/filepath" "sort" "strings" @@ -486,6 +487,15 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package DepsErrors: packagesinternal.GetDepsErrors(pkg), } + if strings.Contains(string(m.PkgPath), "var/folders") { + // On macOS, in marker tests, without a go.mod file, + // this statement is reached. ID, Name, and PkgPath + // take on values that match the LoadDir, such as: + // "/var/folders/fy/dn6v01n16zjdwsqy_8qfbbxr000_9w/T/TestMarkersdiagnosticsissue56943.txt2080018120/001/work". + // TODO(adonovan): find out why. + log.Printf("strange package path: m=%+v pkg=%+v", *m, *pkg) + } + updates[id] = m for _, filename := range pkg.CompiledGoFiles { diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index b6b8bda7f29..1d204fbbc26 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -132,7 +132,15 @@ var update = flag.Bool("update", false, "if set, update test data during marker // // - diag(location, regexp): specifies an expected diagnostic matching the // given regexp at the given location. The test runner requires -// a 1:1 correspondence between observed diagnostics and diag annotations +// a 1:1 correspondence between observed diagnostics and diag annotations. +// +// The marker must accurately represent the diagnostic's range. +// Use grouping parens in the location regular expression to indicate +// a portion in context. +// TODO(adonovan): make this less strict, like the old framework. +// +// TODO(adonovan): in the older marker framework, the annotation asserted +// two additional fields (source="compiler", kind="error"). Restore them? // // - def(src, dst location): perform a textDocument/definition request at // the src location, and check the result points to the dst location. @@ -1206,7 +1214,7 @@ func completeMarker(mark marker, src protocol.Location, want ...string) { } } -// defMarker implements the @godef marker, running textDocument/definition at +// defMarker implements the @def marker, running textDocument/definition at // the given src location and asserting that there is exactly one resulting // location, matching dst. // diff --git a/gopls/internal/lsp/testdata/analyzer/bad_test.go b/gopls/internal/lsp/testdata/analyzer/bad_test.go deleted file mode 100644 index b1724c66693..00000000000 --- a/gopls/internal/lsp/testdata/analyzer/bad_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package analyzer - -import ( - "fmt" - "sync" - "testing" - "time" -) - -func Testbad(t *testing.T) { //@diag("", "tests", "Testbad has malformed name: first letter after 'Test' must not be lowercase", "warning") - var x sync.Mutex - _ = x //@diag("x", "copylocks", "assignment copies lock value to _: sync.Mutex", "warning") - - printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, "printf", "golang.org/lsptests/analyzer.printfWrapper format %s reads arg #1, but call has 0 args", "warning") -} - -func printfWrapper(format string, args ...interface{}) { - fmt.Printf(format, args...) -} - -func _() { - now := time.Now() - fmt.Println(now.Format("2006-02-01")) //@diag("2006-02-01", "timeformat", "2006-02-01 should be 2006-01-02", "warning") -} diff --git a/gopls/internal/lsp/testdata/generated/generated.go b/gopls/internal/lsp/testdata/generated/generated.go deleted file mode 100644 index c7adc180409..00000000000 --- a/gopls/internal/lsp/testdata/generated/generated.go +++ /dev/null @@ -1,7 +0,0 @@ -package generated - -// Code generated by generator.go. DO NOT EDIT. - -func _() { - var y int //@diag("y", "compiler", "y declared (and|but) not used", "error") -} diff --git a/gopls/internal/lsp/testdata/generated/generator.go b/gopls/internal/lsp/testdata/generated/generator.go deleted file mode 100644 index 8e2a4fab722..00000000000 --- a/gopls/internal/lsp/testdata/generated/generator.go +++ /dev/null @@ -1,5 +0,0 @@ -package generated - -func _() { - var x int //@diag("x", "compiler", "x declared (and|but) not used", "error") -} diff --git a/gopls/internal/lsp/testdata/good/good0.go b/gopls/internal/lsp/testdata/good/good0.go index 89450a84543..666171b6724 100644 --- a/gopls/internal/lsp/testdata/good/good0.go +++ b/gopls/internal/lsp/testdata/good/good0.go @@ -1,4 +1,4 @@ -package good //@diag("package", "no_diagnostics", "", "error") +package good func stuff() { //@item(good_stuff, "stuff", "func()", "func"),prepare("stu", "stuff", "stuff") x := 5 diff --git a/gopls/internal/lsp/testdata/good/good1.go b/gopls/internal/lsp/testdata/good/good1.go index 624d8147af2..7d39629a727 100644 --- a/gopls/internal/lsp/testdata/good/good1.go +++ b/gopls/internal/lsp/testdata/good/good1.go @@ -1,4 +1,4 @@ -package good //@diag("package", "no_diagnostics", "", "error") +package good import ( "golang.org/lsptests/types" //@item(types_import, "types", "\"golang.org/lsptests/types\"", "package") diff --git a/gopls/internal/lsp/testdata/noparse/noparse.go.in b/gopls/internal/lsp/testdata/noparse/noparse.go.in index 8b0bfaa035c..e69de29bb2d 100644 --- a/gopls/internal/lsp/testdata/noparse/noparse.go.in +++ b/gopls/internal/lsp/testdata/noparse/noparse.go.in @@ -1,24 +0,0 @@ -package noparse - -// The type error was chosen carefully to exercise a type-error analyzer. -// We use the 'nonewvars' analyzer because the other candidates are tricky: -// -// - The 'unusedvariable' analyzer is disabled by default, so it is not -// consistently enabled across Test{LSP,CommandLine} tests, which -// both process this file. -// - The 'undeclaredname' analyzer depends on the text of the go/types -// "undeclared name" error, which changed in go1.20. -// - The 'noresultvalues' analyzer produces a diagnostic containing newlines, -// which breaks the parser used by TestCommandLine. -// -// This comment is all that remains of my afternoon. - -func bye(x int) { - x := 123 //@diag(":=", "nonewvars", "no new variables", "warning") -} - -func stuff() { - -} - -func .() {} //@diag(".", "syntax", "expected 'IDENT', found '.'", "error") diff --git a/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go b/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go deleted file mode 100644 index 783e9a55f17..00000000000 --- a/gopls/internal/lsp/testdata/rundespiteerrors/rundespiteerrors.go +++ /dev/null @@ -1,14 +0,0 @@ -package rundespiteerrors - -// This test verifies that analyzers without RunDespiteErrors are not -// executed on a package containing type errors (see issue #54762). -func _() { - // A type error. - _ = 1 + "" //@diag("1", "compiler", "mismatched types|cannot convert", "error") - - // A violation of an analyzer for which RunDespiteErrors=false: - // no diagnostic is produced; the diag comment is merely illustrative. - for _ = range "" { //diag("for _", "simplifyrange", "simplify range expression", "warning") - - } -} diff --git a/gopls/internal/lsp/testdata/summary.txt.golden b/gopls/internal/lsp/testdata/summary.txt.golden index 065ae182036..8301739e0e3 100644 --- a/gopls/internal/lsp/testdata/summary.txt.golden +++ b/gopls/internal/lsp/testdata/summary.txt.golden @@ -8,10 +8,10 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 164 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 39 +DiagnosticsCount = 24 FoldingRangesCount = 2 SemanticTokenCount = 3 -SuggestedFixCount = 76 +SuggestedFixCount = 73 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 diff --git a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden index 569c29056e5..52fba365236 100644 --- a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden @@ -8,10 +8,10 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 174 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 39 +DiagnosticsCount = 24 FoldingRangesCount = 2 SemanticTokenCount = 3 -SuggestedFixCount = 82 +SuggestedFixCount = 79 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 diff --git a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden index 06f77638e6c..0cb41b89553 100644 --- a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden @@ -8,10 +8,10 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 174 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 40 +DiagnosticsCount = 25 FoldingRangesCount = 2 SemanticTokenCount = 3 -SuggestedFixCount = 82 +SuggestedFixCount = 79 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 diff --git a/gopls/internal/lsp/testdata/undeclared/var.go b/gopls/internal/lsp/testdata/undeclared/var.go deleted file mode 100644 index 3fda582ce1f..00000000000 --- a/gopls/internal/lsp/testdata/undeclared/var.go +++ /dev/null @@ -1,14 +0,0 @@ -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") - } - r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") - return z -} diff --git a/gopls/internal/lsp/testdata/undeclared/var.go.golden b/gopls/internal/lsp/testdata/undeclared/var.go.golden deleted file mode 100644 index de5cbb42fbb..00000000000 --- a/gopls/internal/lsp/testdata/undeclared/var.go.golden +++ /dev/null @@ -1,51 +0,0 @@ --- suggestedfix_var_10_6 -- -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") - z = 4 - } - i := - for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") - } - r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") - return z -} - --- suggestedfix_var_4_12 -- -package undeclared - -func m() int { - y := - z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") - } - r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") - return z -} - --- suggestedfix_var_7_18 -- -package undeclared - -func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "(undeclared name|undefined): y", "error"),suggestedfix("y", "quickfix", "") - n := - if 100 < 90 { - z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "(undeclared name|undefined): n", "error"),suggestedfix("n", "quickfix", "") - z = 4 - } - for i < 200 { //@diag("i", "compiler", "(undeclared name|undefined): i", "error"),suggestedfix("i", "quickfix", "") - } - r() //@diag("r", "compiler", "(undeclared name|undefined): r", "error") - return z -} - diff --git a/gopls/internal/lsp/tests/util.go b/gopls/internal/lsp/tests/util.go index 415a5fc22ab..b8da2c1a37a 100644 --- a/gopls/internal/lsp/tests/util.go +++ b/gopls/internal/lsp/tests/util.go @@ -93,17 +93,11 @@ func DiffLinks(mapper *protocol.Mapper, wantLinks []Link, gotLinks []protocol.Do } // CompareDiagnostics reports testing errors to t when the diagnostic set got -// does not match want. If the sole expectation has source "no_diagnostics", -// the test expects that no diagnostics were received for the given document. +// does not match want. func CompareDiagnostics(t *testing.T, uri span.URI, want, got []*source.Diagnostic) { t.Helper() fileName := path.Base(string(uri)) - // A special case to test that there are no diagnostics for a file. - if len(want) == 1 && want[0].Source == "no_diagnostics" { - want = nil - } - // Build a helper function to match an actual diagnostic to an overlapping // expected diagnostic (if any). unmatched := make([]*source.Diagnostic, len(want)) diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/analyzers.txt b/gopls/internal/regtest/marker/testdata/diagnostics/analyzers.txt new file mode 100644 index 00000000000..6e7e4650578 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/analyzers.txt @@ -0,0 +1,32 @@ +Test of warning diagnostics from various analyzers: +tests, copylocks, printf, and timeformat. + +-- go.mod -- +module example.com +go 1.12 + +-- bad_test.go -- +package analyzer + +import ( + "fmt" + "sync" + "testing" + "time" +) + +func Testbad(t *testing.T) { //@diag("", re"Testbad has malformed name: first letter after 'Test' must not be lowercase") + var x sync.Mutex + _ = x //@diag("x", re"assignment copies lock value to _: sync.Mutex") + + printfWrapper("%s") //@diag(re`printfWrapper\(.*\)`, re"example.com.printfWrapper format %s reads arg #1, but call has 0 args") +} + +func printfWrapper(format string, args ...interface{}) { + fmt.Printf(format, args...) +} + +func _() { + now := time.Now() + fmt.Println(now.Format("2006-02-01")) //@diag("2006-02-01", re"2006-02-01 should be 2006-01-02") +} diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/generated.txt b/gopls/internal/regtest/marker/testdata/diagnostics/generated.txt new file mode 100644 index 00000000000..bae69b1cd3a --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/generated.txt @@ -0,0 +1,21 @@ +Test of "undeclared" diagnostic in generated code. + +-- go.mod -- +module example.com +go 1.12 + +-- generated.go -- +package generated + +// Code generated by generator.go. DO NOT EDIT. + +func _() { + var y int //@diag("y", re"y declared (and|but) not used") +} + +-- generator.go -- +package generated + +func _() { + var x int //@diag("x", re"x declared (and|but) not used") +} diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt b/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt new file mode 100644 index 00000000000..1cca8b788b7 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt @@ -0,0 +1,33 @@ +This test exercises diagnostics produced by different phases. + +The type error was chosen carefully to exercise a type-error analyzer. +We use the 'nonewvars' analyzer because the other candidates are tricky: + +- The 'unusedvariable' analyzer is disabled by default, so it is not + consistently enabled across Test{LSP,CommandLine} tests, which + both process this file. +- The 'undeclaredname' analyzer depends on the text of the go/types + "undeclared name" error, which changed in go1.20. +- The 'noresultvalues' analyzer produces a diagnostic containing newlines, + which breaks the parser used by TestCommandLine. + +This comment is all that remains of my afternoon. + +FWIW, the first diag is a (compiler, error), the second a (nonewvars, warning). + +-- go.mod -- +module example.com +go 1.12 + +-- noparse.go -- +package noparse + +func bye(x int) { + x := 123 //@diag(re"():=", re"no new variables") +} + +func stuff() { + +} + +func .() {} //@diag(re"func ()[.]", re"expected 'IDENT', found '.'") diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt b/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt new file mode 100644 index 00000000000..671e76db636 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt @@ -0,0 +1,26 @@ + +This test verifies that only parse errors are created. +we produce diagnostics related to mismatching +unexported interface methods in non-workspace packages. + +The type error was chosen to exercise the 'nonewvars' type-error analyzer. +(The 'undeclaredname' analyzer depends on the text of the go/types +"undeclared name" error, which changed in go1.20.) + +-- go.mod -- +module example.com +go 1.12 + +-- a.go -- +package a + +func bye(x int) { + x := 123 //@diag(re"():=", re"no new variables") +} + +func stuff() { + +} + +func .() {} //@diag(re"func ().", re"expected 'IDENT', found '.'") + diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/rundespiteerrors.txt b/gopls/internal/regtest/marker/testdata/diagnostics/rundespiteerrors.txt new file mode 100644 index 00000000000..70e4ebba980 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/rundespiteerrors.txt @@ -0,0 +1,27 @@ +This test verifies that analyzers without RunDespiteErrors are not +executed on a package containing type errors (see issue #54762). + +We require go1.18 because the range of the `1 + ""` go/types error +changed then, and the new @diag marker is quite particular. + +-- go.mod -- +module example.com +go 1.12 + +-- flags -- +-min_go=go1.18 + +-- a.go -- +package a + +func _() { + // A type error. + _ = 1 + "" //@diag(`1 + ""`, re"mismatched types|cannot convert") + + // A violation of an analyzer for which RunDespiteErrors=false: + // no (simplifyrange, warning) diagnostic is produced; the diag + // comment is merely illustrative. + for _ = range "" { //diag("for _", "simplify range expression", ) + + } +} diff --git a/gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt b/gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt new file mode 100644 index 00000000000..6dc27eefd85 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt @@ -0,0 +1,62 @@ +Tests of suggested fixes for "undeclared name" diagnostics, +which are of ("compiler", "error") type. + +-- go.mod -- +module example.com +go 1.12 + +-- a.go -- +package p + +func a() { + z, _ := 1+y, 11 //@suggestedfix("y", re"(undeclared name|undefined): y", "quickfix", a) + _ = z +} + +-- @a/a.go -- +package p + +func a() { + y := + z, _ := 1+y, 11 //@suggestedfix("y", re"(undeclared name|undefined): y", "quickfix", a) + _ = z +} + +-- b.go -- +package p + +func b() { + if 100 < 90 { + } else if 100 > n+2 { //@suggestedfix("n", re"(undeclared name|undefined): n", "quickfix", b) + } +} + +-- @b/b.go -- +package p + +func b() { + n := + if 100 < 90 { + } else if 100 > n+2 { //@suggestedfix("n", re"(undeclared name|undefined): n", "quickfix", b) + } +} + +-- c.go -- +package p + +func c() { + for i < 200 { //@suggestedfix("i", re"(undeclared name|undefined): i", "quickfix", c) + } + r() //@diag("r", re"(undeclared name|undefined): r") +} + +-- @c/c.go -- +package p + +func c() { + i := + for i < 200 { //@suggestedfix("i", re"(undeclared name|undefined): i", "quickfix", c) + } + r() //@diag("r", re"(undeclared name|undefined): r") +} + From 08b24db434d60aae2e79aa8a05a8252b3d6baca7 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 11 Apr 2023 14:30:31 -0400 Subject: [PATCH 002/109] gopls/internal/lsp/regtest: check for "// @marker" rogue space This is crude but nonetheless helpful check for a common mistake in marker tests. Change-Id: I46d1a1faf967cd42d2bf4284d4f3ad343d89add0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/492737 Reviewed-by: Robert Findley Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan --- gopls/internal/lsp/regtest/marker.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index 1d204fbbc26..59f47b991f6 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -627,9 +627,6 @@ func (g *Golden) Get(t testing.TB, name string, updated []byte) ([]byte, bool) { // // See the documentation for RunMarkerTests for more details on the test data // archive. -// -// TODO(rfindley): this test could sanity check the results. For example, it is -// too easy to write "// @" instead of "//@", which we will happy skip silently. func loadMarkerTests(dir string) ([]*markerTest, error) { var tests []*markerTest err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { @@ -638,6 +635,7 @@ func loadMarkerTests(dir string) ([]*markerTest, error) { if err != nil { return err } + name := strings.TrimPrefix(path, dir+string(filepath.Separator)) test, err := loadMarkerTest(name, content) if err != nil { @@ -708,6 +706,14 @@ func loadMarkerTest(name string, content []byte) (*markerTest, error) { if err != nil { return nil, fmt.Errorf("parsing notes in %q: %v", file.Name, err) } + + // Reject common misspelling: "// @mark". + // TODO(adonovan): permit "// @" within a string. Detect multiple spaces. + if i := bytes.Index(file.Data, []byte("// @")); i >= 0 { + line := 1 + bytes.Count(file.Data[:i], []byte("\n")) + return nil, fmt.Errorf("%s:%d: unwanted space before marker (// @)", file.Name, line) + } + test.notes = append(test.notes, notes...) test.files[file.Name] = file.Data } From 90e9c65995cc17c591d8389586d711f23fa5d859 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 8 May 2023 13:36:47 -0400 Subject: [PATCH 003/109] gopls/internal/lsp/cache: skip type errors after parse errors Parser error recovery can delete large swathes of source code; see golang/go#58833 for examples. Type checking syntax trees containing syntax errors may therefore result in a large number of spurious type errors. So, this change suppressed type errors in the presence of syntax errors. Fiddling with these tests is really surprisingly time consuming. Fixes golang/go#59888 Change-Id: Ib489ecf46652c5a346d9caad89fd059434c620f8 Reviewed-on: https://go-review.googlesource.com/c/tools/+/493616 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Robert Findley Run-TryBot: Alan Donovan --- gopls/internal/lsp/cache/analysis.go | 12 +++++-- gopls/internal/lsp/cache/load.go | 10 ------ gopls/internal/lsp/regtest/marker.go | 1 + gopls/internal/lsp/source/view.go | 3 ++ .../lsp/testdata/badstmt/badstmt.go.in | 5 ++- .../internal/lsp/testdata/summary.txt.golden | 2 +- .../lsp/testdata/summary_go1.18.txt.golden | 2 +- .../lsp/testdata/summary_go1.21.txt.golden | 2 +- .../marker/testdata/diagnostics/noparse.txt | 33 ------------------- .../marker/testdata/diagnostics/parseerr.txt | 29 ++++++++-------- .../marker/testdata/diagnostics/typeerr.txt | 33 +++++++++++++++++++ .../marker/testdata/format/noparse.txt | 2 +- 12 files changed, 68 insertions(+), 66 deletions(-) delete mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/typeerr.txt diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go index f7ad189a757..83ae05e430d 100644 --- a/gopls/internal/lsp/cache/analysis.go +++ b/gopls/internal/lsp/cache/analysis.go @@ -605,7 +605,6 @@ func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *sou // TODO(adonovan): port the old logic to: // - gather go/packages diagnostics from m.Errors? (port goPackagesErrorDiagnostics) - // - record unparseable file URIs so we can suppress type errors for these files. // - gather diagnostics from expandErrors + typeErrorDiagnostics + depsErrors. // -- analysis -- @@ -762,7 +761,16 @@ func typeCheckForAnalysis(fset *token.FileSet, parsed []*source.ParsedGoFile, m Sizes: m.TypesSizes, Error: func(e error) { pkg.compiles = false // type error - pkg.typeErrors = append(pkg.typeErrors, e.(types.Error)) + + // Suppress type errors in files with parse errors + // as parser recovery can be quite lossy (#59888). + typeError := e.(types.Error) + for _, p := range parsed { + if p.ParseErr != nil && source.NodeContains(p.File, typeError.Pos) { + return + } + } + pkg.typeErrors = append(pkg.typeErrors, typeError) }, Importer: importerFunc(func(importPath string) (*types.Package, error) { if importPath == "unsafe" { diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index e9e79beca05..ade21ea2857 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -9,7 +9,6 @@ import ( "context" "errors" "fmt" - "log" "path/filepath" "sort" "strings" @@ -487,15 +486,6 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package DepsErrors: packagesinternal.GetDepsErrors(pkg), } - if strings.Contains(string(m.PkgPath), "var/folders") { - // On macOS, in marker tests, without a go.mod file, - // this statement is reached. ID, Name, and PkgPath - // take on values that match the LoadDir, such as: - // "/var/folders/fy/dn6v01n16zjdwsqy_8qfbbxr000_9w/T/TestMarkersdiagnosticsissue56943.txt2080018120/001/work". - // TODO(adonovan): find out why. - log.Printf("strange package path: m=%+v pkg=%+v", *m, *pkg) - } - updates[id] = m for _, filename := range pkg.CompiledGoFiles { diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index 59f47b991f6..cea54b1de72 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -133,6 +133,7 @@ var update = flag.Bool("update", false, "if set, update test data during marker // - diag(location, regexp): specifies an expected diagnostic matching the // given regexp at the given location. The test runner requires // a 1:1 correspondence between observed diagnostics and diag annotations. +// The diagnostics source and kind fields are ignored, to reduce fuss. // // The marker must accurately represent the diagnostic's range. // Use grouping parens in the location regular expression to indicate diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index d736f3fe191..90288cd2847 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -530,6 +530,9 @@ type TidiedModule struct { // Metadata represents package metadata retrieved from go/packages. // The Deps* maps do not contain self-import edges. +// +// An ad-hoc package (without go.mod or GOPATH) has its ID, PkgPath, +// and LoadDir equal to the absolute path of its directory. type Metadata struct { ID PackageID PkgPath PackagePath diff --git a/gopls/internal/lsp/testdata/badstmt/badstmt.go.in b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in index 81aee201d7f..3b8f9e06b39 100644 --- a/gopls/internal/lsp/testdata/badstmt/badstmt.go.in +++ b/gopls/internal/lsp/testdata/badstmt/badstmt.go.in @@ -4,13 +4,12 @@ import ( "golang.org/lsptests/foo" ) -// The nonewvars expectation asserts that the go/analysis framework ran. -// See comments in noparse. +// (The syntax error causes suppression of diagnostics for type errors. +// See issue #59888.) func _(x int) { defer foo.F //@complete(" //", Foo),diag(" //", "syntax", "function must be invoked in defer statement|expression in defer must be function call", "error") defer foo.F //@complete(" //", Foo) - x := 123 //@diag(":=", "nonewvars", "no new variables", "warning") } func _() { diff --git a/gopls/internal/lsp/testdata/summary.txt.golden b/gopls/internal/lsp/testdata/summary.txt.golden index 8301739e0e3..c572e268f7f 100644 --- a/gopls/internal/lsp/testdata/summary.txt.golden +++ b/gopls/internal/lsp/testdata/summary.txt.golden @@ -8,7 +8,7 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 164 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 24 +DiagnosticsCount = 23 FoldingRangesCount = 2 SemanticTokenCount = 3 SuggestedFixCount = 73 diff --git a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden index 52fba365236..da3b553834c 100644 --- a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden @@ -8,7 +8,7 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 174 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 24 +DiagnosticsCount = 23 FoldingRangesCount = 2 SemanticTokenCount = 3 SuggestedFixCount = 79 diff --git a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden index 0cb41b89553..52fba365236 100644 --- a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden @@ -8,7 +8,7 @@ DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 RankedCompletionsCount = 174 CaseSensitiveCompletionsCount = 4 -DiagnosticsCount = 25 +DiagnosticsCount = 24 FoldingRangesCount = 2 SemanticTokenCount = 3 SuggestedFixCount = 79 diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt b/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt deleted file mode 100644 index 1cca8b788b7..00000000000 --- a/gopls/internal/regtest/marker/testdata/diagnostics/noparse.txt +++ /dev/null @@ -1,33 +0,0 @@ -This test exercises diagnostics produced by different phases. - -The type error was chosen carefully to exercise a type-error analyzer. -We use the 'nonewvars' analyzer because the other candidates are tricky: - -- The 'unusedvariable' analyzer is disabled by default, so it is not - consistently enabled across Test{LSP,CommandLine} tests, which - both process this file. -- The 'undeclaredname' analyzer depends on the text of the go/types - "undeclared name" error, which changed in go1.20. -- The 'noresultvalues' analyzer produces a diagnostic containing newlines, - which breaks the parser used by TestCommandLine. - -This comment is all that remains of my afternoon. - -FWIW, the first diag is a (compiler, error), the second a (nonewvars, warning). - --- go.mod -- -module example.com -go 1.12 - --- noparse.go -- -package noparse - -func bye(x int) { - x := 123 //@diag(re"():=", re"no new variables") -} - -func stuff() { - -} - -func .() {} //@diag(re"func ()[.]", re"expected 'IDENT', found '.'") diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt b/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt index 671e76db636..d0df08d8b25 100644 --- a/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt +++ b/gopls/internal/regtest/marker/testdata/diagnostics/parseerr.txt @@ -1,26 +1,27 @@ -This test verifies that only parse errors are created. -we produce diagnostics related to mismatching -unexported interface methods in non-workspace packages. +This test exercises diagnostics produced for syntax errors. -The type error was chosen to exercise the 'nonewvars' type-error analyzer. -(The 'undeclaredname' analyzer depends on the text of the go/types -"undeclared name" error, which changed in go1.20.) +Because parser error recovery can be quite lossy, diagnostics +for type errors are suppressed in files with syntax errors; +see issue #59888. But diagnostics are reported for type errors +in well-formed files of the same package. -- go.mod -- module example.com go 1.12 --- a.go -- -package a +-- bad.go -- +package p -func bye(x int) { - x := 123 //@diag(re"():=", re"no new variables") -} - -func stuff() { - +func f() { + append("") // no diagnostic for type error in file containing syntax error } func .() {} //@diag(re"func ().", re"expected 'IDENT', found '.'") +-- good.go -- +package p + +func g() { + append("") //@diag(re`""`, re"a slice") +} diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/typeerr.txt b/gopls/internal/regtest/marker/testdata/diagnostics/typeerr.txt new file mode 100644 index 00000000000..345c48e420a --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/typeerr.txt @@ -0,0 +1,33 @@ + +This test exercises diagnostics produced for type errors +in the absence of syntax errors. + +The type error was chosen to exercise the 'nonewvars' type-error analyzer. +(The 'undeclaredname' analyzer depends on the text of the go/types +"undeclared name" error, which changed in go1.20.) + +The append() type error was also carefully chosen to have text and +position that are invariant across all versions of Go run by the builders. + +-- go.mod -- +module example.com +go 1.12 + +-- typeerr.go -- +package a + +func f(x int) { + append("") //@diag(re`""`, re"a slice") + + x := 123 //@diag(re"x := 123", re"no new variables"), suggestedfix(re"():", re"no new variables", "quickfix", fix) +} + +-- @fix/typeerr.go -- +package a + +func f(x int) { + append("") //@diag(re`""`, re"a slice") + + x = 123 //@diag(re"x := 123", re"no new variables"), suggestedfix(re"():", re"no new variables", "quickfix", fix) +} + diff --git a/gopls/internal/regtest/marker/testdata/format/noparse.txt b/gopls/internal/regtest/marker/testdata/format/noparse.txt index 51059e83533..afc96cc1ef3 100644 --- a/gopls/internal/regtest/marker/testdata/format/noparse.txt +++ b/gopls/internal/regtest/marker/testdata/format/noparse.txt @@ -21,7 +21,7 @@ func what() { var hi func() if { hi() //@diag(re"(){", re".*missing.*") } - hi := nil //@diag(re"():=", re"no new variables") + hi := nil } -- @noparse -- 7:5: missing condition in if statement From 2310848948fa8dc1473d718e561c6837dec703ec Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 5 May 2023 15:01:28 -0700 Subject: [PATCH 004/109] go/ssa: reindent test cases in TestGenericBodies Change-Id: Icbd0cd9ea7b1b42fa8ed7fe222e83748a1cb8e21 Reviewed-on: https://go-review.googlesource.com/c/tools/+/493097 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan Run-TryBot: Tim King gopls-CI: kokoro --- go/ssa/builder_generic_test.go | 118 ++++++++++++++++----------------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 515ac012308..52fbd67175d 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -36,84 +36,84 @@ func TestGenericBodies(t *testing.T) { } for _, contents := range []string{ ` - package p + package p - func f(x int) { - var i interface{} - print(i, 0) //@ types("interface{}", int) - print() //@ types() - print(x) //@ types(int) - } - `, + func f(x int) { + var i interface{} + print(i, 0) //@ types("interface{}", int) + print() //@ types() + print(x) //@ types(int) + } + `, ` - package q + package q - func f[T any](x T) { - print(x) //@ types(T) - } - `, + func f[T any](x T) { + print(x) //@ types(T) + } + `, ` - package r + package r - func f[T ~int]() { - var x T - print(x) //@ types(T) - } - `, + func f[T ~int]() { + var x T + print(x) //@ types(T) + } + `, ` - package s + package s - func a[T ~[4]byte](x T) { - for k, v := range x { - print(x, k, v) //@ types(T, int, byte) - } + func a[T ~[4]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) } - func b[T ~*[4]byte](x T) { - for k, v := range x { - print(x, k, v) //@ types(T, int, byte) - } + } + func b[T ~*[4]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) } - func c[T ~[]byte](x T) { - for k, v := range x { - print(x, k, v) //@ types(T, int, byte) - } + } + func c[T ~[]byte](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, byte) } - func d[T ~string](x T) { - for k, v := range x { - print(x, k, v) //@ types(T, int, rune) - } + } + func d[T ~string](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, rune) } - func e[T ~map[int]string](x T) { - for k, v := range x { - print(x, k, v) //@ types(T, int, string) - } + } + func e[T ~map[int]string](x T) { + for k, v := range x { + print(x, k, v) //@ types(T, int, string) } - func f[T ~chan string](x T) { - for v := range x { - print(x, v) //@ types(T, string) - } + } + func f[T ~chan string](x T) { + for v := range x { + print(x, v) //@ types(T, string) } + } - func From() { - type A [4]byte - print(a[A]) //@ types("func(x s.A)") + func From() { + type A [4]byte + print(a[A]) //@ types("func(x s.A)") - type B *[4]byte - print(b[B]) //@ types("func(x s.B)") + type B *[4]byte + print(b[B]) //@ types("func(x s.B)") - type C []byte - print(c[C]) //@ types("func(x s.C)") + type C []byte + print(c[C]) //@ types("func(x s.C)") - type D string - print(d[D]) //@ types("func(x s.D)") + type D string + print(d[D]) //@ types("func(x s.D)") - type E map[int]string - print(e[E]) //@ types("func(x s.E)") + type E map[int]string + print(e[E]) //@ types("func(x s.E)") - type F chan string - print(f[F]) //@ types("func(x s.F)") - } - `, + type F chan string + print(f[F]) //@ types("func(x s.F)") + } + `, ` package t From 033e628ac81367121856a18b320c04d589252408 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 5 May 2023 15:14:58 -0700 Subject: [PATCH 005/109] go/ssa: more reindentation of TestGenericBodies This was split into two parts to be easier to review in gerrit. Change-Id: Ie308e06d9c49836f49451006075a3d6fd10cfaee Reviewed-on: https://go-review.googlesource.com/c/tools/+/493057 Reviewed-by: Alan Donovan Run-TryBot: Tim King TryBot-Result: Gopher Robot gopls-CI: kokoro --- go/ssa/builder_generic_test.go | 536 ++++++++++++++++----------------- 1 file changed, 268 insertions(+), 268 deletions(-) diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 52fbd67175d..272a9587f30 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -115,317 +115,317 @@ func TestGenericBodies(t *testing.T) { } `, ` - package t + package t - func f[S any, T ~chan S](x T) { - for v := range x { - print(x, v) //@ types(T, S) - } + func f[S any, T ~chan S](x T) { + for v := range x { + print(x, v) //@ types(T, S) } + } - func From() { - type F chan string - print(f[string, F]) //@ types("func(x t.F)") - } - `, + func From() { + type F chan string + print(f[string, F]) //@ types("func(x t.F)") + } + `, ` - package u - - func fibonacci[T ~chan int](c, quit T) { - x, y := 0, 1 - for { - select { - case c <- x: - x, y = y, x+y - case <-quit: - print(c, quit, x, y) //@ types(T, T, int, int) - return - } + package u + + func fibonacci[T ~chan int](c, quit T) { + x, y := 0, 1 + for { + select { + case c <- x: + x, y = y, x+y + case <-quit: + print(c, quit, x, y) //@ types(T, T, int, int) + return } } - func start[T ~chan int](c, quit T) { - go func() { - for i := 0; i < 10; i++ { - print(<-c) //@ types(int) - } - quit <- 0 - }() - } - func From() { - type F chan int - c := make(F) - quit := make(F) - print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") - print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") - } - `, + } + func start[T ~chan int](c, quit T) { + go func() { + for i := 0; i < 10; i++ { + print(<-c) //@ types(int) + } + quit <- 0 + }() + } + func From() { + type F chan int + c := make(F) + quit := make(F) + print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") + print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") + } + `, ` - package v + package v - func f[T ~struct{ x int; y string }](i int) T { - u := []T{ T{0, "lorem"}, T{1, "ipsum"}} - return u[i] - } - func From() { - type S struct{ x int; y string } - print(f[S]) //@ types("func(i int) v.S") - } - `, + func f[T ~struct{ x int; y string }](i int) T { + u := []T{ T{0, "lorem"}, T{1, "ipsum"}} + return u[i] + } + func From() { + type S struct{ x int; y string } + print(f[S]) //@ types("func(i int) v.S") + } + `, ` - package w + package w - func f[T ~[4]int8](x T, l, h int) []int8 { - return x[l:h] - } - func g[T ~*[4]int16](x T, l, h int) []int16 { - return x[l:h] - } - func h[T ~[]int32](x T, l, h int) T { - return x[l:h] - } - func From() { - type F [4]int8 - type G *[4]int16 - type H []int32 - print(f[F](F{}, 0, 0)) //@ types("[]int8") - print(g[G](nil, 0, 0)) //@ types("[]int16") - print(h[H](nil, 0, 0)) //@ types("w.H") - } - `, + func f[T ~[4]int8](x T, l, h int) []int8 { + return x[l:h] + } + func g[T ~*[4]int16](x T, l, h int) []int16 { + return x[l:h] + } + func h[T ~[]int32](x T, l, h int) T { + return x[l:h] + } + func From() { + type F [4]int8 + type G *[4]int16 + type H []int32 + print(f[F](F{}, 0, 0)) //@ types("[]int8") + print(g[G](nil, 0, 0)) //@ types("[]int16") + print(h[H](nil, 0, 0)) //@ types("w.H") + } + `, ` - package x + package x - func h[E any, T ~[]E](x T, l, h int) []E { - s := x[l:h] - print(s) //@ types("T") - return s - } - func From() { - type H []int32 - print(h[int32, H](nil, 0, 0)) //@ types("[]int32") - } - `, + func h[E any, T ~[]E](x T, l, h int) []E { + s := x[l:h] + print(s) //@ types("T") + return s + } + func From() { + type H []int32 + print(h[int32, H](nil, 0, 0)) //@ types("[]int32") + } + `, ` - package y - - // Test "make" builtin with different forms on core types and - // when capacities are constants or variable. - func h[E any, T ~[]E](m, n int) { - print(make(T, 3)) //@ types(T) - print(make(T, 3, 5)) //@ types(T) - print(make(T, m)) //@ types(T) - print(make(T, m, n)) //@ types(T) - } - func i[K comparable, E any, T ~map[K]E](m int) { - print(make(T)) //@ types(T) - print(make(T, 5)) //@ types(T) - print(make(T, m)) //@ types(T) - } - func j[E any, T ~chan E](m int) { - print(make(T)) //@ types(T) - print(make(T, 6)) //@ types(T) - print(make(T, m)) //@ types(T) - } - func From() { - type H []int32 - h[int32, H](3, 4) - type I map[int8]H - i[int8, H, I](5) - type J chan I - j[I, J](6) - } - `, + package y + + // Test "make" builtin with different forms on core types and + // when capacities are constants or variable. + func h[E any, T ~[]E](m, n int) { + print(make(T, 3)) //@ types(T) + print(make(T, 3, 5)) //@ types(T) + print(make(T, m)) //@ types(T) + print(make(T, m, n)) //@ types(T) + } + func i[K comparable, E any, T ~map[K]E](m int) { + print(make(T)) //@ types(T) + print(make(T, 5)) //@ types(T) + print(make(T, m)) //@ types(T) + } + func j[E any, T ~chan E](m int) { + print(make(T)) //@ types(T) + print(make(T, 6)) //@ types(T) + print(make(T, m)) //@ types(T) + } + func From() { + type H []int32 + h[int32, H](3, 4) + type I map[int8]H + i[int8, H, I](5) + type J chan I + j[I, J](6) + } + `, ` - package z + package z - func h[T ~[4]int](x T) { - print(len(x), cap(x)) //@ types(int, int) - } - func i[T ~[4]byte | []int | ~chan uint8](x T) { - print(len(x), cap(x)) //@ types(int, int) - } - func j[T ~[4]int | any | map[string]int]() { - print(new(T)) //@ types("*T") - } - func k[T ~[4]int | any | map[string]int](x T) { - print(x) //@ types(T) - panic(x) - } - `, + func h[T ~[4]int](x T) { + print(len(x), cap(x)) //@ types(int, int) + } + func i[T ~[4]byte | []int | ~chan uint8](x T) { + print(len(x), cap(x)) //@ types(int, int) + } + func j[T ~[4]int | any | map[string]int]() { + print(new(T)) //@ types("*T") + } + func k[T ~[4]int | any | map[string]int](x T) { + print(x) //@ types(T) + panic(x) + } + `, ` - package a + package a - func f[E any, F ~func() E](x F) { - print(x, x()) //@ types(F, E) - } - func From() { - type T func() int - f[int, T](func() int { return 0 }) - f[int, func() int](func() int { return 1 }) - } - `, + func f[E any, F ~func() E](x F) { + print(x, x()) //@ types(F, E) + } + func From() { + type T func() int + f[int, T](func() int { return 0 }) + f[int, func() int](func() int { return 1 }) + } + `, ` - package b + package b - func f[E any, M ~map[string]E](m M) { - y, ok := m["lorem"] - print(m, y, ok) //@ types(M, E, bool) - } - func From() { - type O map[string][]int - f(O{"lorem": []int{0, 1, 2, 3}}) - } - `, + func f[E any, M ~map[string]E](m M) { + y, ok := m["lorem"] + print(m, y, ok) //@ types(M, E, bool) + } + func From() { + type O map[string][]int + f(O{"lorem": []int{0, 1, 2, 3}}) + } + `, ` - package c + package c - func a[T interface{ []int64 | [5]int64 }](x T) int64 { - print(x, x[2], x[3]) //@ types(T, int64, int64) - x[2] = 5 - return x[3] - } - func b[T interface{ []byte | string }](x T) byte { - print(x, x[3]) //@ types(T, byte) - return x[3] - } - func c[T interface{ []byte }](x T) byte { - print(x, x[2], x[3]) //@ types(T, byte, byte) - x[2] = 'b' - return x[3] - } - func d[T interface{ map[int]int64 }](x T) int64 { - print(x, x[2], x[3]) //@ types(T, int64, int64) - x[2] = 43 - return x[3] - } - func e[T ~string](t T) { - print(t, t[0]) //@ types(T, uint8) - } - func f[T ~string|[]byte](t T) { - print(t, t[0]) //@ types(T, uint8) - } - func g[T []byte](t T) { - print(t, t[0]) //@ types(T, byte) - } - func h[T ~[4]int|[]int](t T) { - print(t, t[0]) //@ types(T, int) - } - func i[T ~[4]int|*[4]int|[]int](t T) { - print(t, t[0]) //@ types(T, int) - } - func j[T ~[4]int|*[4]int|[]int](t T) { - print(t, &t[0]) //@ types(T, "*int") - } - `, + func a[T interface{ []int64 | [5]int64 }](x T) int64 { + print(x, x[2], x[3]) //@ types(T, int64, int64) + x[2] = 5 + return x[3] + } + func b[T interface{ []byte | string }](x T) byte { + print(x, x[3]) //@ types(T, byte) + return x[3] + } + func c[T interface{ []byte }](x T) byte { + print(x, x[2], x[3]) //@ types(T, byte, byte) + x[2] = 'b' + return x[3] + } + func d[T interface{ map[int]int64 }](x T) int64 { + print(x, x[2], x[3]) //@ types(T, int64, int64) + x[2] = 43 + return x[3] + } + func e[T ~string](t T) { + print(t, t[0]) //@ types(T, uint8) + } + func f[T ~string|[]byte](t T) { + print(t, t[0]) //@ types(T, uint8) + } + func g[T []byte](t T) { + print(t, t[0]) //@ types(T, byte) + } + func h[T ~[4]int|[]int](t T) { + print(t, t[0]) //@ types(T, int) + } + func i[T ~[4]int|*[4]int|[]int](t T) { + print(t, t[0]) //@ types(T, int) + } + func j[T ~[4]int|*[4]int|[]int](t T) { + print(t, &t[0]) //@ types(T, "*int") + } + `, ` - package d + package d - type MyInt int - type Other int - type MyInterface interface{ foo() } + type MyInt int + type Other int + type MyInterface interface{ foo() } - // ChangeType tests - func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ } - func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } - func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } - func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ } + // ChangeType tests + func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ } + func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ } - // Convert tests - func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/} - func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ } - func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ } + // Convert tests + func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/} + func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ } + func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ } - // MakeInterface tests - func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ } + // MakeInterface tests + func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ } - // NewConst tests - func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/} + // NewConst tests + func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/} - // SliceToArrayPointer - func sl0[T *[4]int | *[2]int](x []int) { v := T(x); print(x, v) /*@ types("[]int", T)*/ } - func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } - `, + // SliceToArrayPointer + func sl0[T *[4]int | *[2]int](x []int) { v := T(x); print(x, v) /*@ types("[]int", T)*/ } + func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } + `, ` - package e + package e - func c[T interface{ foo() string }](x T) { - print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/ - } - `, + func c[T interface{ foo() string }](x T) { + print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/ + } + `, `package f - func eq[T comparable](t T, i interface{}) bool { - return t == i - } - `, + func eq[T comparable](t T, i interface{}) bool { + return t == i + } + `, // TODO(59983): investigate why writing g.c panics in (*FieldAddr).String. `package g - type S struct{ f int } - func c[P *S]() []P { return []P{{f: 1}} } - `, + type S struct{ f int } + func c[P *S]() []P { return []P{{f: 1}} } + `, `package h - func sign[bytes []byte | string](s bytes) (bool, bool) { - neg := false - if len(s) > 0 && (s[0] == '-' || s[0] == '+') { - neg = s[0] == '-' - s = s[1:] - } - return !neg, len(s) > 0 - }`, + func sign[bytes []byte | string](s bytes) (bool, bool) { + neg := false + if len(s) > 0 && (s[0] == '-' || s[0] == '+') { + neg = s[0] == '-' + s = s[1:] + } + return !neg, len(s) > 0 + }`, `package i - func digits[bytes []byte | string](s bytes) bool { - for _, c := range []byte(s) { - if c < '0' || '9' < c { - return false - } + func digits[bytes []byte | string](s bytes) bool { + for _, c := range []byte(s) { + if c < '0' || '9' < c { + return false } - return true - }`, + } + return true + }`, ` - package j + package j - type E interface{} + type E interface{} - func Foo[T E, PT interface{ *T }]() T { - pt := PT(new(T)) - x := *pt - print(x) /*@ types(T)*/ - return x - } - `, + func Foo[T E, PT interface{ *T }]() T { + pt := PT(new(T)) + x := *pt + print(x) /*@ types(T)*/ + return x + } + `, ` - package k + package k - func f[M any, PM *M](p PM) { - var m M - *p = m - print(m) /*@ types(M)*/ - print(p) /*@ types(PM)*/ - } - `, + func f[M any, PM *M](p PM) { + var m M + *p = m + print(m) /*@ types(M)*/ + print(p) /*@ types(PM)*/ + } + `, ` - package l - - type A struct{int} - func (*A) Marker() {} - - type B struct{string} - func (*B) Marker() {} - - type C struct{float32} - func (*C) Marker() {} - - func process[T interface { - *A - *B - *C - Marker() - }](v T) { - v.Marker() - a := *(any(v).(*A)); print(a) /*@ types("l.A")*/ - b := *(any(v).(*B)); print(b) /*@ types("l.B")*/ - c := *(any(v).(*C)); print(c) /*@ types("l.C")*/ - }`, + package l + + type A struct{int} + func (*A) Marker() {} + + type B struct{string} + func (*B) Marker() {} + + type C struct{float32} + func (*C) Marker() {} + + func process[T interface { + *A + *B + *C + Marker() + }](v T) { + v.Marker() + a := *(any(v).(*A)); print(a) /*@ types("l.A")*/ + b := *(any(v).(*B)); print(b) /*@ types("l.B")*/ + c := *(any(v).(*C)); print(c) /*@ types("l.C")*/ + }`, } { contents := contents pkgname := packageName(t, contents) From 005fa64ffe3ab3c3004cf69596467d7b34ec6d8e Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Tue, 9 May 2023 21:04:27 -0400 Subject: [PATCH 006/109] go/analysis/passes/slog: add check for slog.Group slog.Group now takes a ...any, so add it to the list of functions to check. Change-Id: I73025158a6cf55c7a6689a36f3c2c58df6d0db3f Reviewed-on: https://go-review.googlesource.com/c/tools/+/494098 Run-TryBot: Jonathan Amsterdam gopls-CI: kokoro Reviewed-by: Tim King TryBot-Result: Gopher Robot --- go/analysis/passes/slog/slog.go | 7 +++---- go/analysis/passes/slog/testdata/src/a/a.go | 2 ++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go/analysis/passes/slog/slog.go b/go/analysis/passes/slog/slog.go index 303ff85baf9..cf9b77873c7 100644 --- a/go/analysis/passes/slog/slog.go +++ b/go/analysis/passes/slog/slog.go @@ -174,10 +174,9 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) { } recv := fn.Type().(*types.Signature).Recv() if recv == nil { - // TODO: If #59204 is accepted, uncomment the lines below. - // if fn.Name() == "Group" { - // return 0, true - // } + if fn.Name() == "Group" { + return 0, true + } skip, ok := slogOutputFuncs[fn.Name()] return skip, ok } diff --git a/go/analysis/passes/slog/testdata/src/a/a.go b/go/analysis/passes/slog/testdata/src/a/a.go index 56425a9247d..a13aac773d0 100644 --- a/go/analysis/passes/slog/testdata/src/a/a.go +++ b/go/analysis/passes/slog/testdata/src/a/a.go @@ -143,6 +143,8 @@ func All() { r.Add(1, 2) // want `slog.Record.Add arg "1" should be a string or a slog.Attr` + _ = slog.Group("a", 1, 2, 3) // want `slog.Group arg "2" should be a string or a slog.Attr` + } // Used in tests by package b. From d668f586c32d3403acb49125a070542d6c3e32ff Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 10 May 2023 11:26:34 -0400 Subject: [PATCH 007/109] gopls/internal/regtest/marker: require cgo for issue59944.txt This test is failing on the nocgo builders. Change-Id: I0f3577eee84d29154c9f0fab833b10502f049861 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494176 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley Auto-Submit: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/regtest/marker.go | 10 ++++++++-- .../regtest/marker/testdata/fixedbugs/issue59944.txt | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index cea54b1de72..6b1f4abf0f1 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -100,8 +100,9 @@ var update = flag.Bool("update", false, "if set, update test data during marker // There are three types of file within the test archive that are given special // treatment by the test runner: // - "flags": this file is treated as a whitespace-separated list of flags -// that configure the MarkerTest instance. For example, -min_go=go1.18 sets -// the minimum required Go version for the test. +// that configure the MarkerTest instance. Supported flags: +// -min_go=go1.18 sets the minimum Go version for the test; +// -cgo requires that CGO_ENABLED is set and the cgo tool is available // TODO(rfindley): support flag values containing whitespace. // - "settings.json": this file is parsed as JSON, and used as the // session configuration (see gopls/doc/settings.md) @@ -340,6 +341,9 @@ func RunMarkerTests(t *testing.T, dir string) { } testenv.NeedsGo1Point(t, go1point) } + if test.cgo { + testenv.NeedsTool(t, "cgo") + } config := fake.EditorConfig{ Settings: test.settings, Env: test.env, @@ -553,6 +557,7 @@ type markerTest struct { flags []string // Parsed flags values. minGoVersion string + cgo bool } // flagSet returns the flagset used for parsing the special "flags" file in the @@ -560,6 +565,7 @@ type markerTest struct { func (t *markerTest) flagSet() *flag.FlagSet { flags := flag.NewFlagSet(t.name, flag.ContinueOnError) flags.StringVar(&t.minGoVersion, "min_go", "", "if set, the minimum go1.X version required for this test") + flags.BoolVar(&t.cgo, "cgo", false, "if set, requires cgo (both the cgo tool and CGO_ENABLED=1)") return flags } diff --git a/gopls/internal/regtest/marker/testdata/fixedbugs/issue59944.txt b/gopls/internal/regtest/marker/testdata/fixedbugs/issue59944.txt index 118c2df5772..9e39d8f5fe9 100644 --- a/gopls/internal/regtest/marker/testdata/fixedbugs/issue59944.txt +++ b/gopls/internal/regtest/marker/testdata/fixedbugs/issue59944.txt @@ -4,6 +4,9 @@ the methodset of its receiver type. Adapted from the code in question from the issue. +-- flags -- +-cgo + -- go.mod -- module example.com From a7e7dc44c91eda20c73522ef345d9b64f973f988 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 5 May 2023 15:27:44 -0700 Subject: [PATCH 008/109] go/ssa: rename test cases in TestGenericBodies Previous test cases were an alphabetical series starting at p and wrapping around to l. These are now numbered from p00 to p23. Change-Id: I4ce4f4f9e591cdcff2e67e26103654bb04c5d932 Reviewed-on: https://go-review.googlesource.com/c/tools/+/493058 Reviewed-by: Alan Donovan Run-TryBot: Tim King TryBot-Result: Gopher Robot gopls-CI: kokoro --- go/ssa/builder_generic_test.go | 97 +++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 44 deletions(-) diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 272a9587f30..c5543e37779 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -36,7 +36,7 @@ func TestGenericBodies(t *testing.T) { } for _, contents := range []string{ ` - package p + package p00 func f(x int) { var i interface{} @@ -46,14 +46,14 @@ func TestGenericBodies(t *testing.T) { } `, ` - package q + package p01 func f[T any](x T) { print(x) //@ types(T) } `, ` - package r + package p02 func f[T ~int]() { var x T @@ -61,7 +61,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package s + package p03 func a[T ~[4]byte](x T) { for k, v := range x { @@ -96,26 +96,26 @@ func TestGenericBodies(t *testing.T) { func From() { type A [4]byte - print(a[A]) //@ types("func(x s.A)") + print(a[A]) //@ types("func(x p03.A)") type B *[4]byte - print(b[B]) //@ types("func(x s.B)") + print(b[B]) //@ types("func(x p03.B)") type C []byte - print(c[C]) //@ types("func(x s.C)") + print(c[C]) //@ types("func(x p03.C)") type D string - print(d[D]) //@ types("func(x s.D)") + print(d[D]) //@ types("func(x p03.D)") type E map[int]string - print(e[E]) //@ types("func(x s.E)") + print(e[E]) //@ types("func(x p03.E)") type F chan string - print(f[F]) //@ types("func(x s.F)") + print(f[F]) //@ types("func(x p03.F)") } `, ` - package t + package p05 func f[S any, T ~chan S](x T) { for v := range x { @@ -125,11 +125,11 @@ func TestGenericBodies(t *testing.T) { func From() { type F chan string - print(f[string, F]) //@ types("func(x t.F)") + print(f[string, F]) //@ types("func(x p05.F)") } `, ` - package u + package p06 func fibonacci[T ~chan int](c, quit T) { x, y := 0, 1 @@ -155,12 +155,12 @@ func TestGenericBodies(t *testing.T) { type F chan int c := make(F) quit := make(F) - print(start[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") - print(fibonacci[F], c, quit) //@ types("func(c u.F, quit u.F)", "u.F", "u.F") + print(start[F], c, quit) //@ types("func(c p06.F, quit p06.F)", "p06.F", "p06.F") + print(fibonacci[F], c, quit) //@ types("func(c p06.F, quit p06.F)", "p06.F", "p06.F") } `, ` - package v + package p07 func f[T ~struct{ x int; y string }](i int) T { u := []T{ T{0, "lorem"}, T{1, "ipsum"}} @@ -168,11 +168,11 @@ func TestGenericBodies(t *testing.T) { } func From() { type S struct{ x int; y string } - print(f[S]) //@ types("func(i int) v.S") + print(f[S]) //@ types("func(i int) p07.S") } `, ` - package w + package p08 func f[T ~[4]int8](x T, l, h int) []int8 { return x[l:h] @@ -189,11 +189,11 @@ func TestGenericBodies(t *testing.T) { type H []int32 print(f[F](F{}, 0, 0)) //@ types("[]int8") print(g[G](nil, 0, 0)) //@ types("[]int16") - print(h[H](nil, 0, 0)) //@ types("w.H") + print(h[H](nil, 0, 0)) //@ types("p08.H") } `, ` - package x + package p09 func h[E any, T ~[]E](x T, l, h int) []E { s := x[l:h] @@ -206,7 +206,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package y + package p10 // Test "make" builtin with different forms on core types and // when capacities are constants or variable. @@ -236,7 +236,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package z + package p11 func h[T ~[4]int](x T) { print(len(x), cap(x)) //@ types(int, int) @@ -253,7 +253,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package a + package p12 func f[E any, F ~func() E](x F) { print(x, x()) //@ types(F, E) @@ -265,7 +265,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package b + package p13 func f[E any, M ~map[string]E](m M) { y, ok := m["lorem"] @@ -277,7 +277,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package c + package p14 func a[T interface{ []int64 | [5]int64 }](x T) int64 { print(x, x[2], x[3]) //@ types(T, int64, int64) @@ -318,25 +318,25 @@ func TestGenericBodies(t *testing.T) { } `, ` - package d + package p15 type MyInt int type Other int type MyInterface interface{ foo() } // ChangeType tests - func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "d.MyInt")*/ } + func ct0(x int) { v := MyInt(x); print(x, v) /*@ types(int, "p15.MyInt")*/ } func ct1[T MyInt | Other, S int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } func ct2[T int, S MyInt | int ](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } func ct3[T MyInt | Other, S MyInt | int ](x S) { v := T(x) ; print(x, v) /*@ types(S, T)*/ } // Convert tests - func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("d.MyInt", T)*/} - func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "d.MyInt")*/ } + func co0[T int | int8](x MyInt) { v := T(x); print(x, v) /*@ types("p15.MyInt", T)*/} + func co1[T int | int8](x T) { v := MyInt(x); print(x, v) /*@ types(T, "p15.MyInt")*/ } func co2[S, T int | int8](x T) { v := S(x); print(x, v) /*@ types(T, S)*/ } // MakeInterface tests - func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "d.MyInterface")*/ } + func mi0[T MyInterface](x T) { v := MyInterface(x); print(x, v) /*@ types(T, "p15.MyInterface")*/ } // NewConst tests func nc0[T any]() { v := (*T)(nil); print(v) /*@ types("*T")*/} @@ -346,24 +346,29 @@ func TestGenericBodies(t *testing.T) { func sl1[T *[4]int | *[2]int, S []int](x S) { v := T(x); print(x, v) /*@ types(S, T)*/ } `, ` - package e + package p16 func c[T interface{ foo() string }](x T) { print(x, x.foo, x.foo()) /*@ types(T, "func() string", string)*/ } `, - `package f + ` + package p17 func eq[T comparable](t T, i interface{}) bool { return t == i } `, // TODO(59983): investigate why writing g.c panics in (*FieldAddr).String. - `package g + ` + package p18 + type S struct{ f int } func c[P *S]() []P { return []P{{f: 1}} } `, - `package h + ` + package p19 + func sign[bytes []byte | string](s bytes) (bool, bool) { neg := false if len(s) > 0 && (s[0] == '-' || s[0] == '+') { @@ -371,8 +376,10 @@ func TestGenericBodies(t *testing.T) { s = s[1:] } return !neg, len(s) > 0 - }`, - `package i + } + `, + `package p20 + func digits[bytes []byte | string](s bytes) bool { for _, c := range []byte(s) { if c < '0' || '9' < c { @@ -380,9 +387,10 @@ func TestGenericBodies(t *testing.T) { } } return true - }`, + } + `, ` - package j + package p21 type E interface{} @@ -394,7 +402,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package k + package p22 func f[M any, PM *M](p PM) { var m M @@ -404,7 +412,7 @@ func TestGenericBodies(t *testing.T) { } `, ` - package l + package p23 type A struct{int} func (*A) Marker() {} @@ -422,10 +430,11 @@ func TestGenericBodies(t *testing.T) { Marker() }](v T) { v.Marker() - a := *(any(v).(*A)); print(a) /*@ types("l.A")*/ - b := *(any(v).(*B)); print(b) /*@ types("l.B")*/ - c := *(any(v).(*C)); print(c) /*@ types("l.C")*/ - }`, + a := *(any(v).(*A)); print(a) /*@ types("p23.A")*/ + b := *(any(v).(*B)); print(b) /*@ types("p23.B")*/ + c := *(any(v).(*C)); print(c) /*@ types("p23.C")*/ + } + `, } { contents := contents pkgname := packageName(t, contents) From d799eba7d4921b9d664f467dc661075c6f003a68 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 9 May 2023 18:02:32 -0400 Subject: [PATCH 009/109] gopls: additional instrumentation during goimports Add some visibility into goimports operations, by instrumenting spans in top-level imports and gocommand operations. This may be the first time we instrument non-gopls code in this way, but it should be safe as other build targets (e.g. the goimports or gopackages commands) do not set a global exporter, and therefore the cost of event instrumentation should be minimal. For golang/go#59216 Change-Id: Id2f8fe05d6b61e96cdd2d41cc43b3d4c3cf39e21 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494095 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/lsp/cache/imports.go | 17 +++++++++++++---- gopls/internal/lsp/cache/view.go | 2 +- gopls/internal/lsp/debug/trace.go | 2 +- .../lsp/source/completion/completion.go | 12 ++++++------ gopls/internal/lsp/source/format.go | 8 ++++---- gopls/internal/lsp/source/known_packages.go | 2 +- gopls/internal/lsp/source/view.go | 2 +- internal/gocommand/invoke.go | 18 ++++++++++++++++++ internal/imports/fix.go | 12 ++++++++---- internal/imports/imports.go | 9 +++++++-- internal/imports/mod.go | 4 ++++ 11 files changed, 64 insertions(+), 24 deletions(-) diff --git a/gopls/internal/lsp/cache/imports.go b/gopls/internal/lsp/cache/imports.go index 4c38689793a..55085a2a1e0 100644 --- a/gopls/internal/lsp/cache/imports.go +++ b/gopls/internal/lsp/cache/imports.go @@ -31,7 +31,10 @@ type importsState struct { cachedDirectoryFilters []string } -func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error { +func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(context.Context, *imports.Options) error) error { + ctx, done := event.Start(ctx, "cache.importsState.runProcessEnvFunc") + defer done() + s.mu.Lock() defer s.mu.Unlock() @@ -93,7 +96,7 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot LocalPrefix: localPrefix, } - if err := fn(opts); err != nil { + if err := fn(ctx, opts); err != nil { return err } @@ -114,6 +117,9 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot // the view's process environment. Assumes that the caller is holding the // importsState mutex. func populateProcessEnvFromSnapshot(ctx context.Context, pe *imports.ProcessEnv, snapshot *snapshot) error { + ctx, done := event.Start(ctx, "cache.populateProcessEnvFromSnapshot") + defer done() + if snapshot.view.Options().VerboseOutput { pe.Logf = func(format string, args ...interface{}) { event.Log(ctx, fmt.Sprintf(format, args...)) @@ -153,6 +159,9 @@ func populateProcessEnvFromSnapshot(ctx context.Context, pe *imports.ProcessEnv, } func (s *importsState) refreshProcessEnv() { + ctx, done := event.Start(s.ctx, "cache.importsState.refreshProcessEnv") + defer done() + start := time.Now() s.mu.Lock() @@ -164,9 +173,9 @@ func (s *importsState) refreshProcessEnv() { event.Log(s.ctx, "background imports cache refresh starting") if err := imports.PrimeCache(context.Background(), env); err == nil { - event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start))) + event.Log(ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start))) } else { - event.Log(s.ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err)) + event.Log(ctx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err)) } s.mu.Lock() s.cacheRefreshDuration = time.Since(start) diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index bd91b6b271a..644a43d505a 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -436,7 +436,7 @@ func viewEnv(v *View) string { return buf.String() } -func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error { +func (s *snapshot) RunProcessEnvFunc(ctx context.Context, fn func(context.Context, *imports.Options) error) error { return s.view.importsState.runProcessEnvFunc(ctx, s, fn) } diff --git a/gopls/internal/lsp/debug/trace.go b/gopls/internal/lsp/debug/trace.go index 80cb3dc8dd5..48bed9d3b0f 100644 --- a/gopls/internal/lsp/debug/trace.go +++ b/gopls/internal/lsp/debug/trace.go @@ -35,7 +35,7 @@ var TraceTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`

Recent spans (oldest first)

- A finite number of recent span start/end times are shown below. + A finite number of recent span start/end times are shown below. The nesting represents the children of a parent span (and the log events within a span). A span may appear twice: chronologically at toplevel, and nested within its parent.

diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go index f92790f2753..ad5ce16b372 100644 --- a/gopls/internal/lsp/source/completion/completion.go +++ b/gopls/internal/lsp/source/completion/completion.go @@ -200,7 +200,7 @@ type completer struct { // completionCallbacks is a list of callbacks to collect completions that // require expensive operations. This includes operations where we search // through the entire module cache. - completionCallbacks []func(opts *imports.Options) error + completionCallbacks []func(context.Context, *imports.Options) error // surrounding describes the identifier surrounding the position. surrounding *Selection @@ -887,7 +887,7 @@ func (c *completer) populateImportCompletions(ctx context.Context, searchImport }) } - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + c.completionCallbacks = append(c.completionCallbacks, func(ctx context.Context, opts *imports.Options) error { return imports.GetImportPaths(ctx, searchImports, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) }) return nil @@ -1195,7 +1195,7 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { // Rank import paths as goimports would. var relevances map[string]float64 if len(paths) > 0 { - if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { + if err := c.snapshot.RunProcessEnvFunc(ctx, func(ctx context.Context, opts *imports.Options) error { var err error relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) return err @@ -1342,7 +1342,7 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { } } - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + c.completionCallbacks = append(c.completionCallbacks, func(ctx context.Context, opts *imports.Options) error { defer cancel() return imports.GetPackageExports(ctx, add, id.Name, c.filename, c.pkg.GetTypes().Name(), opts.Env) }) @@ -1635,7 +1635,7 @@ func (c *completer) unimportedPackages(ctx context.Context, seen map[string]stru // Rank candidates using goimports' algorithm. var relevances map[string]float64 if len(paths) != 0 { - if err := c.snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { + if err := c.snapshot.RunProcessEnvFunc(ctx, func(ctx context.Context, opts *imports.Options) error { var err error relevances, err = imports.ScoreImportPaths(ctx, opts.Env, paths) return err @@ -1708,7 +1708,7 @@ func (c *completer) unimportedPackages(ctx context.Context, seen map[string]stru }) count++ } - c.completionCallbacks = append(c.completionCallbacks, func(opts *imports.Options) error { + c.completionCallbacks = append(c.completionCallbacks, func(ctx context.Context, opts *imports.Options) error { defer cancel() return imports.GetAllCandidates(ctx, add, prefix, c.filename, c.pkg.GetTypes().Name(), opts.Env) }) diff --git a/gopls/internal/lsp/source/format.go b/gopls/internal/lsp/source/format.go index ac73c76e5a7..dfc4f7664d5 100644 --- a/gopls/internal/lsp/source/format.go +++ b/gopls/internal/lsp/source/format.go @@ -116,8 +116,8 @@ func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (all if err != nil { return nil, nil, err } - if err := snapshot.RunProcessEnvFunc(ctx, func(opts *imports.Options) error { - allFixEdits, editsPerFix, err = computeImportEdits(snapshot, pgf, opts) + if err := snapshot.RunProcessEnvFunc(ctx, func(ctx context.Context, opts *imports.Options) error { + allFixEdits, editsPerFix, err = computeImportEdits(ctx, snapshot, pgf, opts) return err }); err != nil { return nil, nil, fmt.Errorf("AllImportsFixes: %v", err) @@ -127,11 +127,11 @@ func AllImportsFixes(ctx context.Context, snapshot Snapshot, fh FileHandle) (all // computeImportEdits computes a set of edits that perform one or all of the // necessary import fixes. -func computeImportEdits(snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { +func computeImportEdits(ctx context.Context, snapshot Snapshot, pgf *ParsedGoFile, options *imports.Options) (allFixEdits []protocol.TextEdit, editsPerFix []*ImportFix, err error) { filename := pgf.URI.Filename() // Build up basic information about the original file. - allFixes, err := imports.FixImports(filename, pgf.Src, options) + allFixes, err := imports.FixImports(ctx, filename, pgf.Src, options) if err != nil { return nil, nil, err } diff --git a/gopls/internal/lsp/source/known_packages.go b/gopls/internal/lsp/source/known_packages.go index b1c90a9240f..11134037f14 100644 --- a/gopls/internal/lsp/source/known_packages.go +++ b/gopls/internal/lsp/source/known_packages.go @@ -85,7 +85,7 @@ func KnownPackagePaths(ctx context.Context, snapshot Snapshot, fh FileHandle) ([ } // Augment the set by invoking the goimports algorithm. - if err := snapshot.RunProcessEnvFunc(ctx, func(o *imports.Options) error { + if err := snapshot.RunProcessEnvFunc(ctx, func(ctx context.Context, o *imports.Options) error { ctx, cancel := context.WithTimeout(ctx, time.Millisecond*80) defer cancel() var seenMu sync.Mutex diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index 90288cd2847..b2a2ebda9d5 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -117,7 +117,7 @@ type Snapshot interface { // RunProcessEnvFunc runs fn with the process env for this snapshot's view. // Note: the process env contains cached module and filesystem state. - RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error + RunProcessEnvFunc(ctx context.Context, fn func(context.Context, *imports.Options) error) error // ModFiles are the go.mod files enclosed in the snapshot's view and known // to the snapshot. diff --git a/internal/gocommand/invoke.go b/internal/gocommand/invoke.go index 3c0afe723bf..8d9fc98d8f5 100644 --- a/internal/gocommand/invoke.go +++ b/internal/gocommand/invoke.go @@ -24,6 +24,9 @@ import ( exec "golang.org/x/sys/execabs" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -53,9 +56,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -63,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } @@ -70,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() diff --git a/internal/imports/fix.go b/internal/imports/fix.go index 6b4935257a5..d4f1b4e8a0f 100644 --- a/internal/imports/fix.go +++ b/internal/imports/fix.go @@ -26,6 +26,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -543,7 +544,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { - fixes, err := getFixes(fset, f, filename, env) + fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err } @@ -553,7 +554,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. -func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { abs, err := filepath.Abs(filename) if err != nil { return nil, err @@ -607,7 +608,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // Go look for candidates in $GOPATH, etc. We don't necessarily load // the real exports of sibling imports, so keep assuming their contents. - if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { return nil, err } @@ -1055,7 +1056,10 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []string) } -func addExternalCandidates(pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ diff --git a/internal/imports/imports.go b/internal/imports/imports.go index 95a88383a79..58e637b90f2 100644 --- a/internal/imports/imports.go +++ b/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/format" @@ -23,6 +24,7 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. @@ -66,14 +68,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } - return getFixes(fileSet, file, filename, opt.Env) + return getFixes(ctx, fileSet, file, filename, opt.Env) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode diff --git a/internal/imports/mod.go b/internal/imports/mod.go index 7d99d04ca8a..1389d38b213 100644 --- a/internal/imports/mod.go +++ b/internal/imports/mod.go @@ -19,6 +19,7 @@ import ( "strings" "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -424,6 +425,9 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( } func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + if err := r.init(); err != nil { return err } From 4ed7de18ddf54e1f77414d03a560677248d4cb58 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 9 May 2023 18:22:43 -0400 Subject: [PATCH 010/109] gopls: add google-cloud-go as a benchmark repo This repo is large, and exercises use-cases with many workspace modules. For golang/go#60089 Change-Id: I93b9c99ce52bd7ae25f65fdb9bf0568c25375411 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494096 Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/regtest/bench/definition_test.go | 1 + gopls/internal/regtest/bench/didchange_test.go | 1 + gopls/internal/regtest/bench/hover_test.go | 1 + gopls/internal/regtest/bench/implementations_test.go | 1 + gopls/internal/regtest/bench/iwl_test.go | 7 ++++--- gopls/internal/regtest/bench/references_test.go | 1 + gopls/internal/regtest/bench/rename_test.go | 3 ++- gopls/internal/regtest/bench/repo_test.go | 8 ++++++++ 8 files changed, 19 insertions(+), 4 deletions(-) diff --git a/gopls/internal/regtest/bench/definition_test.go b/gopls/internal/regtest/bench/definition_test.go index a3e68f5327a..f73bcb040f4 100644 --- a/gopls/internal/regtest/bench/definition_test.go +++ b/gopls/internal/regtest/bench/definition_test.go @@ -15,6 +15,7 @@ func BenchmarkDefinition(b *testing.B) { regexp string }{ {"istio", "pkg/config/model.go", `gogotypes\.(MarshalAny)`}, + {"google-cloud-go", "httpreplay/httpreplay.go", `proxy\.(ForRecording)`}, {"kubernetes", "pkg/controller/lookup_cache.go", `hashutil\.(DeepHashObject)`}, {"kuma", "api/generic/insights.go", `proto\.(Message)`}, {"pkgsite", "internal/log/log.go", `derrors\.(Wrap)`}, diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go index da51c089e0e..6bde10e1452 100644 --- a/gopls/internal/regtest/bench/didchange_test.go +++ b/gopls/internal/regtest/bench/didchange_test.go @@ -23,6 +23,7 @@ var didChangeTests = []struct { repo string file string }{ + {"google-cloud-go", "httpreplay/httpreplay.go"}, {"istio", "pkg/fuzz/util.go"}, {"kubernetes", "pkg/controller/lookup_cache.go"}, {"kuma", "api/generic/insights.go"}, diff --git a/gopls/internal/regtest/bench/hover_test.go b/gopls/internal/regtest/bench/hover_test.go index e89e03b332a..afc1b3c7b4a 100644 --- a/gopls/internal/regtest/bench/hover_test.go +++ b/gopls/internal/regtest/bench/hover_test.go @@ -14,6 +14,7 @@ func BenchmarkHover(b *testing.B) { file string regexp string }{ + {"google-cloud-go", "httpreplay/httpreplay.go", `proxy\.(ForRecording)`}, {"istio", "pkg/config/model.go", `gogotypes\.(MarshalAny)`}, {"kubernetes", "pkg/apis/core/types.go", "type (Pod)"}, {"kuma", "api/generic/insights.go", `proto\.(Message)`}, diff --git a/gopls/internal/regtest/bench/implementations_test.go b/gopls/internal/regtest/bench/implementations_test.go index 219f42a374c..ff64e8ba7dc 100644 --- a/gopls/internal/regtest/bench/implementations_test.go +++ b/gopls/internal/regtest/bench/implementations_test.go @@ -12,6 +12,7 @@ func BenchmarkImplementations(b *testing.B) { file string regexp string }{ + {"google-cloud-go", "httpreplay/httpreplay.go", `type (Recorder)`}, {"istio", "pkg/config/mesh/watcher.go", `type (Watcher)`}, {"kubernetes", "pkg/controller/lookup_cache.go", `objectWithMeta`}, {"kuma", "api/generic/insights.go", `type (Insight)`}, diff --git a/gopls/internal/regtest/bench/iwl_test.go b/gopls/internal/regtest/bench/iwl_test.go index 32bfa9b3c16..c4a2d0f23bf 100644 --- a/gopls/internal/regtest/bench/iwl_test.go +++ b/gopls/internal/regtest/bench/iwl_test.go @@ -20,12 +20,13 @@ func BenchmarkInitialWorkspaceLoad(b *testing.B) { repo string file string }{ - {"tools", "internal/lsp/cache/snapshot.go"}, + {"google-cloud-go", "httpreplay/httpreplay.go"}, + {"istio", "pkg/fuzz/util.go"}, {"kubernetes", "pkg/controller/lookup_cache.go"}, + {"kuma", "api/generic/insights.go"}, {"pkgsite", "internal/frontend/server.go"}, {"starlark", "starlark/eval.go"}, - {"istio", "pkg/fuzz/util.go"}, - {"kuma", "api/generic/insights.go"}, + {"tools", "internal/lsp/cache/snapshot.go"}, } for _, test := range tests { diff --git a/gopls/internal/regtest/bench/references_test.go b/gopls/internal/regtest/bench/references_test.go index d47ea56a47e..099d9bd606f 100644 --- a/gopls/internal/regtest/bench/references_test.go +++ b/gopls/internal/regtest/bench/references_test.go @@ -12,6 +12,7 @@ func BenchmarkReferences(b *testing.B) { file string regexp string }{ + {"google-cloud-go", "httpreplay/httpreplay.go", `func (NewRecorder)`}, {"istio", "pkg/config/model.go", "type (Meta)"}, {"kubernetes", "pkg/controller/lookup_cache.go", "type (objectWithMeta)"}, {"kuma", "pkg/events/interfaces.go", "type (Event)"}, diff --git a/gopls/internal/regtest/bench/rename_test.go b/gopls/internal/regtest/bench/rename_test.go index bd1ce94910c..ebb3482a1cf 100644 --- a/gopls/internal/regtest/bench/rename_test.go +++ b/gopls/internal/regtest/bench/rename_test.go @@ -16,9 +16,10 @@ func BenchmarkRename(b *testing.B) { regexp string baseName string }{ + {"google-cloud-go", "httpreplay/httpreplay.go", `func (NewRecorder)`, "NewRecorder"}, + {"istio", "pkg/config/model.go", `(Namespace) string`, "Namespace"}, {"kubernetes", "pkg/controller/lookup_cache.go", `hashutil\.(DeepHashObject)`, "DeepHashObject"}, {"kuma", "pkg/events/interfaces.go", `Delete`, "Delete"}, - {"istio", "pkg/config/model.go", `(Namespace) string`, "Namespace"}, {"pkgsite", "internal/log/log.go", `func (Infof)`, "Infof"}, {"starlark", "starlark/eval.go", `Program\) (Filename)`, "Filename"}, {"tools", "internal/lsp/cache/snapshot.go", `meta \*(metadataGraph)`, "metadataGraph"}, diff --git a/gopls/internal/regtest/bench/repo_test.go b/gopls/internal/regtest/bench/repo_test.go index 7b238f5b604..0b92b124642 100644 --- a/gopls/internal/regtest/bench/repo_test.go +++ b/gopls/internal/regtest/bench/repo_test.go @@ -26,6 +26,14 @@ import ( // These repos were selected to represent a variety of different types of // codebases. var repos = map[string]*repo{ + // google-cloud-go has 145 workspace modules (!), and is quite large. + "google-cloud-go": { + name: "google-cloud-go", + url: "https://github.com/googleapis/google-cloud-go.git", + commit: "07da765765218debf83148cc7ed8a36d6e8921d5", + inDir: flag.String("cloud_go_dir", "", "if set, reuse this directory as google-cloud-go@07da7657"), + }, + // Used by x/benchmarks; large. "istio": { name: "istio", From 787e7207e090a656da7b860c8119052f2929c0bb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 9 May 2023 18:57:05 -0400 Subject: [PATCH 011/109] gopls/internal/lsp: optimize checks for ignored files Optimize checking for ignored files to avoid unnecessary checks, and only build prefixes once. Along the way, fix a bug where path segments were not handled correctly in the ignore check. Encapsulate the check to make this easy to test. As a result, the DiagnoseChange/google-cloud-go benchmark improved ~5x from ~1.5s to 300ms. Also remove span.Dir, which tended to lead to unnecessary filepath->span->filepath conversions. Inline it in the one place where it was correct. For golang/go#60089 Change-Id: Id24d05b504b43e6a6d9b77b5b578583e1351de31 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494097 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro --- gopls/internal/lsp/cache/snapshot.go | 7 ++- gopls/internal/lsp/cache/view.go | 63 +++++++++++++++++++++------ gopls/internal/lsp/cache/view_test.go | 29 ++++++++++++ gopls/internal/lsp/diagnostics.go | 4 +- gopls/internal/span/uri.go | 8 ---- 5 files changed, 87 insertions(+), 24 deletions(-) diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 6bd4be80156..7e9a06f5cea 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -185,6 +185,11 @@ type snapshot struct { // pkgIndex is an index of package IDs, for efficient storage of typerefs. pkgIndex *typerefs.PackageIndex + + // Only compute module prefixes once, as they are used with high frequency to + // detect ignored files. + ignoreFilterOnce sync.Once + ignoreFilter *ignoreFilter } var globalSnapshotID uint64 @@ -1195,7 +1200,7 @@ func (s *snapshot) GoModForFile(uri span.URI) span.URI { func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { var match span.URI for modURI := range modFiles { - if !source.InDir(span.Dir(modURI).Filename(), uri.Filename()) { + if !source.InDir(filepath.Dir(modURI.Filename()), uri.Filename()) { continue } if len(modURI) > len(match) { diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index 644a43d505a..ae988368aa0 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -588,22 +588,57 @@ func (v *View) shutdown() { v.snapshotWG.Wait() } +// While go list ./... skips directories starting with '.', '_', or 'testdata', +// gopls may still load them via file queries. Explicitly filter them out. func (s *snapshot) IgnoredFile(uri span.URI) bool { - filename := uri.Filename() - var prefixes []string - if len(s.workspaceModFiles) == 0 { - for _, entry := range filepath.SplitList(s.view.gopath) { - prefixes = append(prefixes, filepath.Join(entry, "src")) + // Fast path: if uri doesn't contain '.', '_', or 'testdata', it is not + // possible that it is ignored. + { + uriStr := string(uri) + if !strings.Contains(uriStr, ".") && !strings.Contains(uriStr, "_") && !strings.Contains(uriStr, "testdata") { + return false } - } else { - prefixes = append(prefixes, s.view.gomodcache) - for m := range s.workspaceModFiles { - prefixes = append(prefixes, span.Dir(m).Filename()) + } + + s.ignoreFilterOnce.Do(func() { + var dirs []string + if len(s.workspaceModFiles) == 0 { + for _, entry := range filepath.SplitList(s.view.gopath) { + dirs = append(dirs, filepath.Join(entry, "src")) + } + } else { + dirs = append(dirs, s.view.gomodcache) + for m := range s.workspaceModFiles { + dirs = append(dirs, filepath.Dir(m.Filename())) + } } + s.ignoreFilter = newIgnoreFilter(dirs) + }) + + return s.ignoreFilter.ignored(uri.Filename()) +} + +// An ignoreFilter implements go list's exclusion rules via its 'ignored' method. +type ignoreFilter struct { + prefixes []string // root dirs, ending in filepath.Separator +} + +// newIgnoreFilter returns a new ignoreFilter implementing exclusion rules +// relative to the provided directories. +func newIgnoreFilter(dirs []string) *ignoreFilter { + f := new(ignoreFilter) + for _, d := range dirs { + f.prefixes = append(f.prefixes, filepath.Clean(d)+string(filepath.Separator)) } - for _, prefix := range prefixes { - if strings.HasPrefix(filename, prefix) { - return checkIgnored(filename[len(prefix):]) + return f +} + +func (f *ignoreFilter) ignored(filename string) bool { + for _, prefix := range f.prefixes { + if suffix := strings.TrimPrefix(filename, prefix); suffix != filename { + if checkIgnored(suffix) { + return true + } } } return false @@ -615,6 +650,8 @@ func (s *snapshot) IgnoredFile(uri span.URI) bool { // Directory and file names that begin with "." or "_" are ignored // by the go tool, as are directories named "testdata". func checkIgnored(suffix string) bool { + // Note: this could be further optimized by writing a HasSegment helper, a + // segment-boundary respecting variant of strings.Contains. for _, component := range strings.Split(suffix, string(filepath.Separator)) { if len(component) == 0 { continue @@ -911,7 +948,7 @@ func (v *View) workingDir() span.URI { // TODO(golang/go#57514): eliminate the expandWorkspaceToModule setting // entirely. if v.Options().ExpandWorkspaceToModule && v.gomod != "" { - return span.Dir(v.gomod) + return span.URIFromPath(filepath.Dir(v.gomod.Filename())) } return v.folder } diff --git a/gopls/internal/lsp/cache/view_test.go b/gopls/internal/lsp/cache/view_test.go index 9e6d23bb82b..90471ed4401 100644 --- a/gopls/internal/lsp/cache/view_test.go +++ b/gopls/internal/lsp/cache/view_test.go @@ -276,3 +276,32 @@ func toJSON(x interface{}) string { b, _ := json.MarshalIndent(x, "", " ") return string(b) } + +func TestIgnoreFilter(t *testing.T) { + tests := []struct { + dirs []string + path string + want bool + }{ + {[]string{"a"}, "a/testdata/foo", true}, + {[]string{"a"}, "a/_ignore/foo", true}, + {[]string{"a"}, "a/.ignore/foo", true}, + {[]string{"a"}, "b/testdata/foo", false}, + {[]string{"a"}, "testdata/foo", false}, + {[]string{"a", "b"}, "b/testdata/foo", true}, + {[]string{"a"}, "atestdata/foo", false}, + } + + for _, test := range tests { + // convert to filepaths, for convenience + for i, dir := range test.dirs { + test.dirs[i] = filepath.FromSlash(dir) + } + test.path = filepath.FromSlash(test.path) + + f := newIgnoreFilter(test.dirs) + if got := f.ignored(test.path); got != test.want { + t.Errorf("newIgnoreFilter(%q).ignore(%q) = %t, want %t", test.dirs, test.path, got, test.want) + } + } +} diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go index 0ae56c863dd..520549d1aaf 100644 --- a/gopls/internal/lsp/diagnostics.go +++ b/gopls/internal/lsp/diagnostics.go @@ -365,10 +365,10 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, analyze var hasNonIgnored, hasOpenFile bool for _, uri := range m.CompiledGoFiles { seen[uri] = struct{}{} - if !snapshot.IgnoredFile(uri) { + if !hasNonIgnored && !snapshot.IgnoredFile(uri) { hasNonIgnored = true } - if snapshot.IsOpen(uri) { + if !hasOpenFile && snapshot.IsOpen(uri) { hasOpenFile = true } } diff --git a/gopls/internal/span/uri.go b/gopls/internal/span/uri.go index e6191f7ab12..cf2d66df20b 100644 --- a/gopls/internal/span/uri.go +++ b/gopls/internal/span/uri.go @@ -175,11 +175,3 @@ func isWindowsDriveURIPath(uri string) bool { } return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' } - -// Dir returns the URI for the directory containing uri. Dir panics if uri is -// not a file uri. -// -// TODO(rfindley): add a unit test for various edge cases. -func Dir(uri URI) URI { - return URIFromPath(filepath.Dir(uri.Filename())) -} From f44f50e390e21927a72d869c216ad990115e8677 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 10 Apr 2023 18:07:18 -0400 Subject: [PATCH 012/109] gopls/internal/lsp/source: implementation: report builtin 'error' This change causes the implementation query to report the (fake) location in builtin.go of the the built-in error type, if the query type satisfies that interface. Similarly, the error.Error method. Also, a regtest. Fixes golang/go#59527 Change-Id: I61b179c33c5dfa2c5933f6cae79e7245f83292f2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/483535 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro --- gopls/internal/lsp/regtest/marker.go | 3 + gopls/internal/lsp/source/implementation.go | 30 +++++++- .../internal/regtest/misc/references_test.go | 77 +++++++++++++++++-- 3 files changed, 101 insertions(+), 9 deletions(-) diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index 6b1f4abf0f1..a1d2d6c60ad 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -289,6 +289,9 @@ var update = flag.Bool("update", false, "if set, update test data during marker // - parallelize/optimize test execution // - reorganize regtest packages (and rename to just 'test'?) // - Rename the files .txtar. +// - Provide some means by which locations in the standard library +// (or builtin.go) can be named, so that, for example, we can we +// can assert that MyError implements the built-in error type. // // Existing marker tests (in ../testdata) to port: // - CallHierarchy diff --git a/gopls/internal/lsp/source/implementation.go b/gopls/internal/lsp/source/implementation.go index 00559cf30d9..25beccf6e1d 100644 --- a/gopls/internal/lsp/source/implementation.go +++ b/gopls/internal/lsp/source/implementation.go @@ -30,7 +30,6 @@ import ( // // TODO(adonovan): // - Audit to ensure robustness in face of type errors. -// - Support 'error' and 'error.Error', which were also lacking from the old implementation. // - Eliminate false positives due to 'tricky' cases of the global algorithm. // - Ensure we have test coverage of: // type aliases @@ -388,9 +387,38 @@ func localImplementations(ctx context.Context, snapshot Snapshot, pkg Package, q locs = append(locs, loc) } + // Special case: for types that satisfy error, report builtin.go (see #59527). + if types.Implements(queryType, errorInterfaceType) { + loc, err := errorLocation(ctx, snapshot) + if err != nil { + return nil, err + } + locs = append(locs, loc) + } + return locs, nil } +var errorInterfaceType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +// errorLocation returns the location of the 'error' type in builtin.go. +func errorLocation(ctx context.Context, snapshot Snapshot) (protocol.Location, error) { + pgf, err := snapshot.BuiltinFile(ctx) + if err != nil { + return protocol.Location{}, err + } + for _, decl := range pgf.File.Decls { + if decl, ok := decl.(*ast.GenDecl); ok { + for _, spec := range decl.Specs { + if spec, ok := spec.(*ast.TypeSpec); ok && spec.Name.Name == "error" { + return pgf.NodeLocation(spec.Name) + } + } + } + } + return protocol.Location{}, fmt.Errorf("built-in error type not found") +} + // concreteImplementsIntf returns true if a is an interface type implemented by // concrete type b, or vice versa. func concreteImplementsIntf(a, b types.Type) bool { diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index f0fa379f2b9..ae9a7629528 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -7,6 +7,8 @@ package misc import ( "fmt" "os" + "path/filepath" + "reflect" "sort" "strings" "testing" @@ -315,11 +317,11 @@ func _() { // - inside the foo.mod/bar [foo.mod/bar.test] test variant package // - from the foo.mod/bar_test [foo.mod/bar.test] x_test package // - from the foo.mod/foo package - {"Blah", []string{"bar/bar.go", "bar/bar_test.go", "bar/bar_x_test.go", "foo/foo.go"}}, + {"Blah", []string{"bar/bar.go:3", "bar/bar_test.go:7", "bar/bar_x_test.go:12", "foo/foo.go:12"}}, // Foo is referenced in bar_x_test.go via the intermediate test variant // foo.mod/foo [foo.mod/bar.test]. - {"Foo", []string{"bar/bar_x_test.go", "foo/foo.go"}}, + {"Foo", []string{"bar/bar_x_test.go:13", "foo/foo.go:5"}}, } for _, test := range refTests { @@ -339,11 +341,11 @@ func _() { // InterfaceM is implemented both in foo.mod/bar [foo.mod/bar.test] (which // doesn't import foo), and in foo.mod/bar_test [foo.mod/bar.test], which // imports the test variant of foo. - {"InterfaceM", []string{"bar/bar_test.go", "bar/bar_x_test.go"}}, + {"InterfaceM", []string{"bar/bar_test.go:3", "bar/bar_x_test.go:8"}}, // A search within the ordinary package to should find implementations // (Fer) within the augmented test package. - {"InterfaceF", []string{"foo/foo_test.go"}}, + {"InterfaceF", []string{"foo/foo_test.go:3"}}, } for _, test := range implTests { @@ -503,19 +505,78 @@ func F() {} // declaration env.OpenFile("a/a.go") refLoc := env.RegexpSearch("a/a.go", "F") got := fileLocations(env, env.References(refLoc)) - want := []string{"a/a.go", "b/b.go", "lib/lib.go"} + want := []string{"a/a.go:5", "b/b.go:5", "lib/lib.go:3"} if diff := cmp.Diff(want, got); diff != "" { t.Errorf("incorrect References (-want +got):\n%s", diff) } }) } -// fileLocations returns a new sorted array of the relative -// file name of each location. Duplicates are not removed. +// Test an 'implementation' query on a type that implements 'error'. +// (Unfortunately builtin locations cannot be expressed using @loc +// in the marker test framework.) +func TestImplementationsOfError(t *testing.T) { + const src = ` +-- go.mod -- +module example.com +go 1.12 + +-- a.go -- +package a + +type Error2 interface { + Error() string +} + +type MyError int +func (MyError) Error() string { return "" } + +type MyErrorPtr int +func (*MyErrorPtr) Error() string { return "" } +` + Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("a.go") + + for _, test := range []struct { + re string + want []string + }{ + // error type + {"Error2", []string{"a.go:10", "a.go:7", "std:builtin/builtin.go"}}, + {"MyError", []string{"a.go:3", "std:builtin/builtin.go"}}, + {"MyErrorPtr", []string{"a.go:3", "std:builtin/builtin.go"}}, + // error.Error method + {"(Error).. string", []string{"a.go:11", "a.go:8", "std:builtin/builtin.go"}}, + {"MyError. (Error)", []string{"a.go:4", "std:builtin/builtin.go"}}, + {"MyErrorPtr. (Error)", []string{"a.go:4", "std:builtin/builtin.go"}}, + } { + matchLoc := env.RegexpSearch("a.go", test.re) + impls := env.Implementations(matchLoc) + got := fileLocations(env, impls) + if !reflect.DeepEqual(got, test.want) { + t.Errorf("Implementations(%q) = %q, want %q", + test.re, got, test.want) + } + } + }) +} + +// fileLocations returns a new sorted array of the +// relative file name and line number of each location. +// Duplicates are not removed. +// Standard library filenames are abstracted for robustness. func fileLocations(env *regtest.Env, locs []protocol.Location) []string { got := make([]string, 0, len(locs)) for _, loc := range locs { - got = append(got, env.Sandbox.Workdir.URIToPath(loc.URI)) + path := env.Sandbox.Workdir.URIToPath(loc.URI) // (slashified) + if i := strings.Index(path, "/src/"); i >= 0 && filepath.IsAbs(path) { + // Absolute path with "src" segment: assume it's in GOROOT. + // Strip directory and don't add line/column since they are fragile. + path = "std:" + path[i+len("/src/"):] + } else { + path = fmt.Sprintf("%s:%d", path, loc.Range.Start.Line+1) + } + got = append(got, path) } sort.Strings(got) return got From 4318d630dcefcffb145a9f9602996f22bb9df1d1 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 10 May 2023 16:23:22 -0400 Subject: [PATCH 013/109] gopls: change the default value of "symbolScope" to "all" Following discussion on golang/go#37236, let's be a bit careful before changing behavior. For gopls@v0.12.0, we can keep the default of this setting at "all", and solicit feedback for which default our users prefer. Updates golang/go#37236 Change-Id: Ia92382d808983a6ce566c85d06b82afd2375fb90 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494217 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/doc/settings.md | 2 +- gopls/internal/lsp/source/api_json.go | 2 +- gopls/internal/lsp/source/options.go | 2 +- .../regtest/marker/testdata/workspacesymbol/wsscope.txt | 6 +++--- gopls/internal/regtest/misc/workspace_symbol_test.go | 1 + 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md index fa810f0eb27..d3ffcc06dc0 100644 --- a/gopls/doc/settings.md +++ b/gopls/doc/settings.md @@ -470,7 +470,7 @@ Must be one of: dependencies. * `"workspace"` matches symbols in workspace packages only. -Default: `"workspace"`. +Default: `"all"`. #### **verboseOutput** *bool* diff --git a/gopls/internal/lsp/source/api_json.go b/gopls/internal/lsp/source/api_json.go index cfe4c6e61e1..d6fddc995e7 100644 --- a/gopls/internal/lsp/source/api_json.go +++ b/gopls/internal/lsp/source/api_json.go @@ -208,7 +208,7 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "`\"workspace\"` matches symbols in workspace packages only.\n", }, }, - Default: "\"workspace\"", + Default: "\"all\"", Hierarchy: "ui.navigation", }, { diff --git a/gopls/internal/lsp/source/options.go b/gopls/internal/lsp/source/options.go index 4667b64f406..2ca889513da 100644 --- a/gopls/internal/lsp/source/options.go +++ b/gopls/internal/lsp/source/options.go @@ -143,7 +143,7 @@ func DefaultOptions() *Options { ImportShortcut: BothShortcuts, SymbolMatcher: SymbolFastFuzzy, SymbolStyle: DynamicSymbols, - SymbolScope: WorkspaceSymbolScope, + SymbolScope: AllSymbolScope, }, CompletionOptions: CompletionOptions{ Matcher: Fuzzy, diff --git a/gopls/internal/regtest/marker/testdata/workspacesymbol/wsscope.txt b/gopls/internal/regtest/marker/testdata/workspacesymbol/wsscope.txt index 023230d62bb..e49483ad450 100644 --- a/gopls/internal/regtest/marker/testdata/workspacesymbol/wsscope.txt +++ b/gopls/internal/regtest/marker/testdata/workspacesymbol/wsscope.txt @@ -1,10 +1,10 @@ -This test verifies behavior when "symbolScope" is set to the default value -("workspace"). +This test verifies behavior when "symbolScope" is set to "workspace". -- settings.json -- { "symbolStyle": "full", - "symbolMatcher": "casesensitive" + "symbolMatcher": "casesensitive", + "symbolScope": "workspace" } -- go.mod -- diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go index a99323dec20..849743b5b10 100644 --- a/gopls/internal/regtest/misc/workspace_symbol_test.go +++ b/gopls/internal/regtest/misc/workspace_symbol_test.go @@ -73,6 +73,7 @@ const ( "Fooex", // shorter than Fooest, FooBar, lexically before Fooey "Fooey", // shorter than Fooest, Foobar "Fooest", + "unsafe.Offsetof", // a very fuzzy match ) }) } From 3034d9c3a7917a0d2048e44e7511241092009fde Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 10 May 2023 15:04:38 -0400 Subject: [PATCH 014/109] gopls/internal/lsp/cmd: append, don't overwrite, in PublishDiagnostics This change causes the CLI tool's PublishDiagnostics operation to accumulate, rather than overwrite, the slice of file diagnostics, under the hypothesis that it is receiving multiple events and the later ones are clobbering the earlier ones, causing golang/go#59475. We perform a crude de-duplication in case this should result in duplicate diagnostics. A more robust approach using textDocument/diagnostic will be added in a follow-up. Also, clarify the mutex's responsibility, copy (don't alias) the diagnostics slice in the critical section, and tidy up the surrounding code. Updates golang/go#59475 Change-Id: Ifbb4974ef00ab7bd6547de28f052cec86462230b Reviewed-on: https://go-review.googlesource.com/c/tools/+/494275 Run-TryBot: Alan Donovan Auto-Submit: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/cmd/cmd.go | 26 +++++++++++++++++++++-- gopls/internal/lsp/cmd/suggested_fix.go | 28 ++++++++++++------------- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/gopls/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go index 0cbbd60dacf..02e135ae4e5 100644 --- a/gopls/internal/lsp/cmd/cmd.go +++ b/gopls/internal/lsp/cmd/cmd.go @@ -406,7 +406,7 @@ type cmdClient struct { diagnosticsMu sync.Mutex diagnosticsDone chan struct{} - filesMu sync.Mutex + filesMu sync.Mutex // guards files map and each cmdFile.diagnostics files map[span.URI]*cmdFile } @@ -518,6 +518,11 @@ func (c *cmdClient) ApplyEdit(ctx context.Context, p *protocol.ApplyWorkspaceEdi } func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishDiagnosticsParams) error { + var debug = os.Getenv(DebugSuggestedFixEnvVar) == "true" + if debug { + log.Printf("PublishDiagnostics URI=%v Diagnostics=%v", p.URI, p.Diagnostics) + } + if p.URI == "gopls://diagnostics-done" { close(c.diagnosticsDone) } @@ -530,7 +535,24 @@ func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishD defer c.filesMu.Unlock() file := c.getFile(ctx, fileURI(p.URI)) - file.diagnostics = p.Diagnostics + file.diagnostics = append(file.diagnostics, p.Diagnostics...) + + // Perform a crude in-place deduplication. + // TODO(golang/go#60122): replace the ad-hoc gopls/diagnoseFiles + // non-standard request with support for textDocument/diagnostic, + // so that we don't need to do this de-duplication. + type key [5]interface{} + seen := make(map[key]bool) + out := file.diagnostics[:0] + for _, d := range file.diagnostics { + k := key{d.Range, d.Severity, d.Code, d.Source, d.Message} + if !seen[k] { + seen[k] = true + out = append(out, d) + } + } + file.diagnostics = out + return nil } diff --git a/gopls/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go index d7d0b094d85..1128688f970 100644 --- a/gopls/internal/lsp/cmd/suggested_fix.go +++ b/gopls/internal/lsp/cmd/suggested_fix.go @@ -67,13 +67,24 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { if err != nil { return err } + rng, err := file.mapper.SpanRange(from) + if err != nil { + return err + } + // Get diagnostics. if err := conn.diagnoseFiles(ctx, []span.URI{uri}); err != nil { return err } + diagnostics := []protocol.Diagnostic{} // LSP wants non-nil slice conn.Client.filesMu.Lock() - defer conn.Client.filesMu.Unlock() + diagnostics = append(diagnostics, file.diagnostics...) + conn.Client.filesMu.Unlock() + if debug { + log.Printf("file diagnostics: %#v", diagnostics) + } + // Request code actions codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix} if len(args) > 1 { codeActionKinds = []protocol.CodeActionKind{} @@ -81,25 +92,13 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k)) } } - - rng, err := file.mapper.SpanRange(from) - if err != nil { - return err - } - if file.diagnostics == nil { - // LSP requires a slice, not a nil. - file.diagnostics = []protocol.Diagnostic{} - } - if debug { - log.Printf("file diagnostics: %#v", file.diagnostics) - } p := protocol.CodeActionParams{ TextDocument: protocol.TextDocumentIdentifier{ URI: protocol.URIFromSpanURI(uri), }, Context: protocol.CodeActionContext{ Only: codeActionKinds, - Diagnostics: file.diagnostics, + Diagnostics: diagnostics, }, Range: rng, } @@ -111,6 +110,7 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { log.Printf("code actions: %#v", actions) } + // Gather edits from matching code actions. var edits []protocol.TextEdit for _, a := range actions { if a.Command != nil { From 9aa9d134de8c729e1c021604252494aff9d72ee5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 10 May 2023 15:36:30 -0400 Subject: [PATCH 015/109] cmd/bisect, internal/bisect: use more precise skip suffixes, written in hex If the target is misbehaving and some changes unlock others, then in general bisect is going to have a hard time and can fail to identify the problem. It will usually say "target fails inconsistently". One robustness improvement we can make is to use more bits than necessary for exclusions, which reduces the chances of accidentally excluding changes that simply didn't trigger this time around but might still be part of a bug later. To do this, we calculate the minimum number of bits needed to distinguish all the =y and =n changes observed, round up to a number of hex digits, and then add another digit for good measure. Change-Id: I02354f281370806c3eb4d85911a6ca92fcfcae05 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494276 Run-TryBot: Russ Cox TryBot-Result: Gopher Robot Reviewed-by: David Chase --- cmd/bisect/main.go | 52 +++++++++- cmd/bisect/testdata/basic.txt | 26 ++--- cmd/bisect/testdata/count2.txt | 52 +++++----- cmd/bisect/testdata/double.txt | 50 ++++----- cmd/bisect/testdata/max1.txt | 2 +- cmd/bisect/testdata/max2.txt | 56 +++++----- cmd/bisect/testdata/maxset.txt | 102 +++++++++---------- cmd/bisect/testdata/maxset4.txt | 174 ++++++++++++++++---------------- cmd/bisect/testdata/negate.txt | 50 ++++----- cmd/bisect/testdata/rand.txt | 52 +++++----- internal/bisect/bisect.go | 23 ++++- 11 files changed, 351 insertions(+), 288 deletions(-) diff --git a/cmd/bisect/main.go b/cmd/bisect/main.go index baae1fa17fb..6a3745c0582 100644 --- a/cmd/bisect/main.go +++ b/cmd/bisect/main.go @@ -136,9 +136,11 @@ import ( "fmt" "io" "log" + "math/bits" "math/rand" "os" "os/exec" + "sort" "strconv" "strings" "time" @@ -260,6 +262,18 @@ type Bisect struct { // each pattern starts with a !. Disable bool + // SkipDigits is the number of hex digits to use in skip messages. + // If the set of available changes is the same in each run, as it should be, + // then this doesn't matter: we'll only exclude suffixes that uniquely identify + // a given change. But for some programs, especially bisecting runtime + // behaviors, sometimes enabling one change unlocks questions about other + // changes. Strictly speaking this is a misuse of bisect, but just to make + // bisect more robust, we use the y and n runs to create an estimate of the + // number of bits needed for a unique suffix, and then we round it up to + // a number of hex digits, with one extra digit for good measure, and then + // we always use that many hex digits for skips. + SkipHexDigits int + // Add is a list of suffixes to add to every trial, because they // contain changes that are necessary for a group we are assembling. Add []string @@ -337,6 +351,10 @@ func (b *Bisect) Search() bool { b.Fatalf("target fails with no changes and all changes") } + // Compute minimum number of bits needed to distinguish + // all the changes we saw during N and all the changes we saw during Y. + b.SkipHexDigits = skipHexDigits(runN.MatchIDs, runY.MatchIDs) + // Loop finding and printing change sets, until none remain. found := 0 for { @@ -417,6 +435,35 @@ func (b *Bisect) Logf(format string, args ...any) { b.Stderr.Write([]byte(s)) } +func skipHexDigits(idY, idN []uint64) int { + var all []uint64 + seen := make(map[uint64]bool) + for _, x := range idY { + seen[x] = true + all = append(all, x) + } + for _, x := range idN { + if !seen[x] { + seen[x] = true + all = append(all, x) + } + } + sort.Slice(all, func(i, j int) bool { return bits.Reverse64(all[i]) < bits.Reverse64(all[j]) }) + digits := sort.Search(64/4, func(digits int) bool { + mask := uint64(1)<<(4*digits) - 1 + for i := 0; i+1 < len(all); i++ { + if all[i]&mask == all[i+1]&mask { + return false + } + } + return true + }) + if digits < 64/4 { + digits++ + } + return digits +} + // search searches for a single locally minimal change set. // // Invariant: r describes the result of r.Suffix + b.Add, which failed. @@ -436,10 +483,7 @@ func (b *Bisect) search(r *Result) []string { // If there's one matching change, that's the one we're looking for. if len(r.MatchIDs) == 1 { - if r.Suffix == "" { - return []string{"y"} - } - return []string{r.Suffix} + return []string{fmt.Sprintf("x%0*x", b.SkipHexDigits, r.MatchIDs[0]&(1<<(4*b.SkipHexDigits)-1))} } // If the suffix we were tracking in the trial is already 64 bits, diff --git a/cmd/bisect/testdata/basic.txt b/cmd/bisect/testdata/basic.txt index 57543bd5af4..10c98df6c4c 100644 --- a/cmd/bisect/testdata/basic.txt +++ b/cmd/bisect/testdata/basic.txt @@ -22,23 +22,23 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... FAIL (44 matches) -bisect: run: test +00-0000010... ok (23 matches) -bisect: run: test +10-0000010... FAIL (21 matches) -bisect: run: test +010-0000010... ok (10 matches) -bisect: run: test +110-0000010... FAIL (11 matches) -bisect: run: test +0110-0000010... FAIL (6 matches) -bisect: run: test +00110-0000010... FAIL (3 matches) -bisect: run: test +000110-0000010... FAIL (2 matches) -bisect: run: test +0000110-0000010... FAIL (1 matches) +bisect: run: test +0-x002... FAIL (44 matches) +bisect: run: test +00-x002... ok (23 matches) +bisect: run: test +10-x002... FAIL (21 matches) +bisect: run: test +010-x002... ok (10 matches) +bisect: run: test +110-x002... FAIL (11 matches) +bisect: run: test +0110-x002... FAIL (6 matches) +bisect: run: test +00110-x002... FAIL (3 matches) +bisect: run: test +000110-x002... FAIL (2 matches) +bisect: run: test +0000110-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000110-0000010... FAIL (1 matches) +bisect: run: test v+x006-x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000110-0000010... ok (88 matches) +bisect: run: test -x006-x002... ok (88 matches) bisect: target succeeds with all remaining changes enabled diff --git a/cmd/bisect/testdata/count2.txt b/cmd/bisect/testdata/count2.txt index cee0cd07398..9e7e9f44de2 100644 --- a/cmd/bisect/testdata/count2.txt +++ b/cmd/bisect/testdata/count2.txt @@ -32,36 +32,36 @@ bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... FAIL (44 matches) -bisect: run: test +0-0000010... FAIL (44 matches) -bisect: run: test +00-0000010... ok (23 matches) -bisect: run: test +00-0000010... ok (23 matches) -bisect: run: test +10-0000010... FAIL (21 matches) -bisect: run: test +10-0000010... FAIL (21 matches) -bisect: run: test +010-0000010... ok (10 matches) -bisect: run: test +010-0000010... ok (10 matches) -bisect: run: test +110-0000010... FAIL (11 matches) -bisect: run: test +110-0000010... FAIL (11 matches) -bisect: run: test +0110-0000010... FAIL (6 matches) -bisect: run: test +0110-0000010... FAIL (6 matches) -bisect: run: test +00110-0000010... FAIL (3 matches) -bisect: run: test +00110-0000010... FAIL (3 matches) -bisect: run: test +000110-0000010... FAIL (2 matches) -bisect: run: test +000110-0000010... FAIL (2 matches) -bisect: run: test +0000110-0000010... FAIL (1 matches) -bisect: run: test +0000110-0000010... FAIL (1 matches) +bisect: run: test +0-x002... FAIL (44 matches) +bisect: run: test +0-x002... FAIL (44 matches) +bisect: run: test +00-x002... ok (23 matches) +bisect: run: test +00-x002... ok (23 matches) +bisect: run: test +10-x002... FAIL (21 matches) +bisect: run: test +10-x002... FAIL (21 matches) +bisect: run: test +010-x002... ok (10 matches) +bisect: run: test +010-x002... ok (10 matches) +bisect: run: test +110-x002... FAIL (11 matches) +bisect: run: test +110-x002... FAIL (11 matches) +bisect: run: test +0110-x002... FAIL (6 matches) +bisect: run: test +0110-x002... FAIL (6 matches) +bisect: run: test +00110-x002... FAIL (3 matches) +bisect: run: test +00110-x002... FAIL (3 matches) +bisect: run: test +000110-x002... FAIL (2 matches) +bisect: run: test +000110-x002... FAIL (2 matches) +bisect: run: test +0000110-x002... FAIL (1 matches) +bisect: run: test +0000110-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000110-0000010... FAIL (1 matches) -bisect: run: test v+0000110-0000010... FAIL (1 matches) +bisect: run: test v+x006-x002... FAIL (1 matches) +bisect: run: test v+x006-x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000110-0000010... ok (88 matches) -bisect: run: test -0000110-0000010... ok (88 matches) +bisect: run: test -x006-x002... ok (88 matches) +bisect: run: test -x006-x002... ok (88 matches) bisect: target succeeds with all remaining changes enabled diff --git a/cmd/bisect/testdata/double.txt b/cmd/bisect/testdata/double.txt index 93712ad5afe..427ed092637 100644 --- a/cmd/bisect/testdata/double.txt +++ b/cmd/bisect/testdata/double.txt @@ -23,35 +23,35 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... ok (44 matches) -bisect: run: test +1-0000010... ok (45 matches) -bisect: run: test +0+1-0000010... FAIL (44 matches) -bisect: run: test +00+1-0000010... ok (23 matches) -bisect: run: test +10+1-0000010... FAIL (21 matches) -bisect: run: test +010+1-0000010... ok (10 matches) -bisect: run: test +110+1-0000010... FAIL (11 matches) -bisect: run: test +0110+1-0000010... FAIL (6 matches) -bisect: run: test +00110+1-0000010... FAIL (3 matches) -bisect: run: test +000110+1-0000010... FAIL (2 matches) -bisect: run: test +0000110+1-0000010... FAIL (1 matches) -bisect: run: test +1+0000110-0000010... FAIL (45 matches) -bisect: run: test +01+0000110-0000010... ok (23 matches) -bisect: run: test +11+0000110-0000010... FAIL (22 matches) -bisect: run: test +011+0000110-0000010... FAIL (11 matches) -bisect: run: test +0011+0000110-0000010... ok (6 matches) -bisect: run: test +1011+0000110-0000010... FAIL (5 matches) -bisect: run: test +01011+0000110-0000010... ok (3 matches) -bisect: run: test +11011+0000110-0000010... FAIL (2 matches) -bisect: run: test +011011+0000110-0000010... ok (1 matches) -bisect: run: test +111011+0000110-0000010... FAIL (1 matches) +bisect: run: test +0-x002... ok (44 matches) +bisect: run: test +1-x002... ok (45 matches) +bisect: run: test +0+1-x002... FAIL (44 matches) +bisect: run: test +00+1-x002... ok (23 matches) +bisect: run: test +10+1-x002... FAIL (21 matches) +bisect: run: test +010+1-x002... ok (10 matches) +bisect: run: test +110+1-x002... FAIL (11 matches) +bisect: run: test +0110+1-x002... FAIL (6 matches) +bisect: run: test +00110+1-x002... FAIL (3 matches) +bisect: run: test +000110+1-x002... FAIL (2 matches) +bisect: run: test +0000110+1-x002... FAIL (1 matches) +bisect: run: test +1+x006-x002... FAIL (45 matches) +bisect: run: test +01+x006-x002... ok (23 matches) +bisect: run: test +11+x006-x002... FAIL (22 matches) +bisect: run: test +011+x006-x002... FAIL (11 matches) +bisect: run: test +0011+x006-x002... ok (6 matches) +bisect: run: test +1011+x006-x002... FAIL (5 matches) +bisect: run: test +01011+x006-x002... ok (3 matches) +bisect: run: test +11011+x006-x002... FAIL (2 matches) +bisect: run: test +011011+x006-x002... ok (1 matches) +bisect: run: test +111011+x006-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000110+111011-0000010... FAIL (2 matches) +bisect: run: test v+x006+x03b-x002... FAIL (2 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000110-111011-0000010... ok (87 matches) +bisect: run: test -x006-x03b-x002... ok (87 matches) bisect: target succeeds with all remaining changes enabled diff --git a/cmd/bisect/testdata/max1.txt b/cmd/bisect/testdata/max1.txt index e1848d5e5b1..4014276d603 100644 --- a/cmd/bisect/testdata/max1.txt +++ b/cmd/bisect/testdata/max1.txt @@ -19,5 +19,5 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set diff --git a/cmd/bisect/testdata/max2.txt b/cmd/bisect/testdata/max2.txt index 9f651692aa3..981b902c951 100644 --- a/cmd/bisect/testdata/max2.txt +++ b/cmd/bisect/testdata/max2.txt @@ -24,36 +24,36 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... ok (44 matches) -bisect: run: test +1-0000010... FAIL (45 matches) -bisect: run: test +01-0000010... ok (23 matches) -bisect: run: test +11-0000010... ok (22 matches) -bisect: run: test +01+11-0000010... FAIL (23 matches) -bisect: run: test +001+11-0000010... ok (12 matches) -bisect: run: test +101+11-0000010... FAIL (11 matches) -bisect: run: test +0101+11-0000010... ok (6 matches) -bisect: run: test +1101+11-0000010... ok (5 matches) -bisect: run: test +0101+11+1101-0000010... FAIL (6 matches) -bisect: run: test +00101+11+1101-0000010... FAIL (3 matches) -bisect: run: test +000101+11+1101-0000010... FAIL (2 matches) -bisect: run: test +0000101+11+1101-0000010... ok (1 matches) -bisect: run: test +1000101+11+1101-0000010... FAIL (1 matches) -bisect: run: test +1101+11+1000101-0000010... FAIL (5 matches) -bisect: run: test +01101+11+1000101-0000010... FAIL (3 matches) -bisect: run: test +001101+11+1000101-0000010... FAIL (2 matches) -bisect: run: test +0001101+11+1000101-0000010... FAIL (1 matches) -bisect: run: test +11+1000101+0001101-0000010... FAIL (22 matches) -bisect: run: test +011+1000101+0001101-0000010... ok (11 matches) -bisect: run: test +111+1000101+0001101-0000010... FAIL (11 matches) -bisect: run: test +0111+1000101+0001101-0000010... FAIL (6 matches) -bisect: run: test +00111+1000101+0001101-0000010... FAIL (3 matches) -bisect: run: test +000111+1000101+0001101-0000010... ok (2 matches) -bisect: run: test +100111+1000101+0001101-0000010... FAIL (1 matches) +bisect: run: test +0-x002... ok (44 matches) +bisect: run: test +1-x002... FAIL (45 matches) +bisect: run: test +01-x002... ok (23 matches) +bisect: run: test +11-x002... ok (22 matches) +bisect: run: test +01+11-x002... FAIL (23 matches) +bisect: run: test +001+11-x002... ok (12 matches) +bisect: run: test +101+11-x002... FAIL (11 matches) +bisect: run: test +0101+11-x002... ok (6 matches) +bisect: run: test +1101+11-x002... ok (5 matches) +bisect: run: test +0101+11+1101-x002... FAIL (6 matches) +bisect: run: test +00101+11+1101-x002... FAIL (3 matches) +bisect: run: test +000101+11+1101-x002... FAIL (2 matches) +bisect: run: test +0000101+11+1101-x002... ok (1 matches) +bisect: run: test +1000101+11+1101-x002... FAIL (1 matches) +bisect: run: test +1101+11+x045-x002... FAIL (5 matches) +bisect: run: test +01101+11+x045-x002... FAIL (3 matches) +bisect: run: test +001101+11+x045-x002... FAIL (2 matches) +bisect: run: test +0001101+11+x045-x002... FAIL (1 matches) +bisect: run: test +11+x045+x00d-x002... FAIL (22 matches) +bisect: run: test +011+x045+x00d-x002... ok (11 matches) +bisect: run: test +111+x045+x00d-x002... FAIL (11 matches) +bisect: run: test +0111+x045+x00d-x002... FAIL (6 matches) +bisect: run: test +00111+x045+x00d-x002... FAIL (3 matches) +bisect: run: test +000111+x045+x00d-x002... ok (2 matches) +bisect: run: test +100111+x045+x00d-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+1000101+0001101+100111-0000010... FAIL (3 matches) +bisect: run: test v+x045+x00d+x027-x002... FAIL (3 matches) bisect: FOUND failing change set diff --git a/cmd/bisect/testdata/maxset.txt b/cmd/bisect/testdata/maxset.txt index 259fa3e57a7..cf8af34fa1e 100644 --- a/cmd/bisect/testdata/maxset.txt +++ b/cmd/bisect/testdata/maxset.txt @@ -24,61 +24,61 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... ok (44 matches) -bisect: run: test +1-0000010... FAIL (45 matches) -bisect: run: test +01-0000010... ok (23 matches) -bisect: run: test +11-0000010... ok (22 matches) -bisect: run: test +01+11-0000010... FAIL (23 matches) -bisect: run: test +001+11-0000010... ok (12 matches) -bisect: run: test +101+11-0000010... FAIL (11 matches) -bisect: run: test +0101+11-0000010... ok (6 matches) -bisect: run: test +1101+11-0000010... ok (5 matches) -bisect: run: test +0101+11+1101-0000010... FAIL (6 matches) -bisect: run: test +00101+11+1101-0000010... FAIL (3 matches) -bisect: run: test +000101+11+1101-0000010... FAIL (2 matches) -bisect: run: test +0000101+11+1101-0000010... ok (1 matches) -bisect: run: test +1000101+11+1101-0000010... FAIL (1 matches) -bisect: run: test +1101+11+1000101-0000010... FAIL (5 matches) -bisect: run: test +01101+11+1000101-0000010... FAIL (3 matches) -bisect: run: test +001101+11+1000101-0000010... FAIL (2 matches) -bisect: run: test +0001101+11+1000101-0000010... FAIL (1 matches) -bisect: run: test +11+1000101+0001101-0000010... FAIL (22 matches) -bisect: run: test +011+1000101+0001101-0000010... ok (11 matches) -bisect: run: test +111+1000101+0001101-0000010... FAIL (11 matches) -bisect: run: test +0111+1000101+0001101-0000010... FAIL (6 matches) -bisect: run: test +00111+1000101+0001101-0000010... FAIL (3 matches) -bisect: run: test +000111+1000101+0001101-0000010... ok (2 matches) -bisect: run: test +100111+1000101+0001101-0000010... FAIL (1 matches) +bisect: run: test +0-x002... ok (44 matches) +bisect: run: test +1-x002... FAIL (45 matches) +bisect: run: test +01-x002... ok (23 matches) +bisect: run: test +11-x002... ok (22 matches) +bisect: run: test +01+11-x002... FAIL (23 matches) +bisect: run: test +001+11-x002... ok (12 matches) +bisect: run: test +101+11-x002... FAIL (11 matches) +bisect: run: test +0101+11-x002... ok (6 matches) +bisect: run: test +1101+11-x002... ok (5 matches) +bisect: run: test +0101+11+1101-x002... FAIL (6 matches) +bisect: run: test +00101+11+1101-x002... FAIL (3 matches) +bisect: run: test +000101+11+1101-x002... FAIL (2 matches) +bisect: run: test +0000101+11+1101-x002... ok (1 matches) +bisect: run: test +1000101+11+1101-x002... FAIL (1 matches) +bisect: run: test +1101+11+x045-x002... FAIL (5 matches) +bisect: run: test +01101+11+x045-x002... FAIL (3 matches) +bisect: run: test +001101+11+x045-x002... FAIL (2 matches) +bisect: run: test +0001101+11+x045-x002... FAIL (1 matches) +bisect: run: test +11+x045+x00d-x002... FAIL (22 matches) +bisect: run: test +011+x045+x00d-x002... ok (11 matches) +bisect: run: test +111+x045+x00d-x002... FAIL (11 matches) +bisect: run: test +0111+x045+x00d-x002... FAIL (6 matches) +bisect: run: test +00111+x045+x00d-x002... FAIL (3 matches) +bisect: run: test +000111+x045+x00d-x002... ok (2 matches) +bisect: run: test +100111+x045+x00d-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+1000101+0001101+100111-0000010... FAIL (3 matches) +bisect: run: test v+x045+x00d+x027-x002... FAIL (3 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -1000101-0001101-100111-0000010... FAIL (86 matches) +bisect: run: test -x045-x00d-x027-x002... FAIL (86 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-1000101-0001101-100111-0000010... ok (44 matches) -bisect: run: test +1-1000101-0001101-100111-0000010... ok (42 matches) -bisect: run: test +0+1-1000101-0001101-100111-0000010... FAIL (44 matches) -bisect: run: test +00+1-1000101-0001101-100111-0000010... FAIL (23 matches) -bisect: run: test +000+1-1000101-0001101-100111-0000010... ok (12 matches) -bisect: run: test +100+1-1000101-0001101-100111-0000010... ok (11 matches) -bisect: run: test +000+1+100-1000101-0001101-100111-0000010... FAIL (12 matches) -bisect: run: test +0000+1+100-1000101-0001101-100111-0000010... FAIL (6 matches) -bisect: run: test +00000+1+100-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +000000+1+100-1000101-0001101-100111-0000010... ok (2 matches) -bisect: run: test +100000+1+100-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +100+1+100000-1000101-0001101-100111-0000010... FAIL (11 matches) -bisect: run: test +0100+1+100000-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +1100+1+100000-1000101-0001101-100111-0000010... FAIL (5 matches) -bisect: run: test +01100+1+100000-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +001100+1+100000-1000101-0001101-100111-0000010... FAIL (2 matches) -bisect: run: test +0001100+1+100000-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +1+100000+0001100-1000101-0001101-100111-0000010... FAIL (42 matches) -bisect: run: test +01+100000+0001100-1000101-0001101-100111-0000010... FAIL (21 matches) -bisect: run: test +001+100000+0001100-1000101-0001101-100111-0000010... FAIL (12 matches) -bisect: run: test +0001+100000+0001100-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +1001+100000+0001100-1000101-0001101-100111-0000010... ok (6 matches) +bisect: run: test +0-x045-x00d-x027-x002... ok (44 matches) +bisect: run: test +1-x045-x00d-x027-x002... ok (42 matches) +bisect: run: test +0+1-x045-x00d-x027-x002... FAIL (44 matches) +bisect: run: test +00+1-x045-x00d-x027-x002... FAIL (23 matches) +bisect: run: test +000+1-x045-x00d-x027-x002... ok (12 matches) +bisect: run: test +100+1-x045-x00d-x027-x002... ok (11 matches) +bisect: run: test +000+1+100-x045-x00d-x027-x002... FAIL (12 matches) +bisect: run: test +0000+1+100-x045-x00d-x027-x002... FAIL (6 matches) +bisect: run: test +00000+1+100-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +000000+1+100-x045-x00d-x027-x002... ok (2 matches) +bisect: run: test +100000+1+100-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +100+1+x020-x045-x00d-x027-x002... FAIL (11 matches) +bisect: run: test +0100+1+x020-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +1100+1+x020-x045-x00d-x027-x002... FAIL (5 matches) +bisect: run: test +01100+1+x020-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +001100+1+x020-x045-x00d-x027-x002... FAIL (2 matches) +bisect: run: test +0001100+1+x020-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +1+x020+x00c-x045-x00d-x027-x002... FAIL (42 matches) +bisect: run: test +01+x020+x00c-x045-x00d-x027-x002... FAIL (21 matches) +bisect: run: test +001+x020+x00c-x045-x00d-x027-x002... FAIL (12 matches) +bisect: run: test +0001+x020+x00c-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +1001+x020+x00c-x045-x00d-x027-x002... ok (6 matches) diff --git a/cmd/bisect/testdata/maxset4.txt b/cmd/bisect/testdata/maxset4.txt index ac02a677ae2..8211c4ccd9b 100644 --- a/cmd/bisect/testdata/maxset4.txt +++ b/cmd/bisect/testdata/maxset4.txt @@ -34,105 +34,105 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... ok (44 matches) -bisect: run: test +1-0000010... FAIL (45 matches) -bisect: run: test +01-0000010... ok (23 matches) -bisect: run: test +11-0000010... ok (22 matches) -bisect: run: test +01+11-0000010... FAIL (23 matches) -bisect: run: test +001+11-0000010... ok (12 matches) -bisect: run: test +101+11-0000010... FAIL (11 matches) -bisect: run: test +0101+11-0000010... ok (6 matches) -bisect: run: test +1101+11-0000010... ok (5 matches) -bisect: run: test +0101+11+1101-0000010... FAIL (6 matches) -bisect: run: test +00101+11+1101-0000010... FAIL (3 matches) -bisect: run: test +000101+11+1101-0000010... FAIL (2 matches) -bisect: run: test +0000101+11+1101-0000010... ok (1 matches) -bisect: run: test +1000101+11+1101-0000010... FAIL (1 matches) -bisect: run: test +1101+11+1000101-0000010... FAIL (5 matches) -bisect: run: test +01101+11+1000101-0000010... FAIL (3 matches) -bisect: run: test +001101+11+1000101-0000010... FAIL (2 matches) -bisect: run: test +0001101+11+1000101-0000010... FAIL (1 matches) -bisect: run: test +11+1000101+0001101-0000010... FAIL (22 matches) -bisect: run: test +011+1000101+0001101-0000010... ok (11 matches) -bisect: run: test +111+1000101+0001101-0000010... FAIL (11 matches) -bisect: run: test +0111+1000101+0001101-0000010... FAIL (6 matches) -bisect: run: test +00111+1000101+0001101-0000010... FAIL (3 matches) -bisect: run: test +000111+1000101+0001101-0000010... ok (2 matches) -bisect: run: test +100111+1000101+0001101-0000010... FAIL (1 matches) +bisect: run: test +0-x002... ok (44 matches) +bisect: run: test +1-x002... FAIL (45 matches) +bisect: run: test +01-x002... ok (23 matches) +bisect: run: test +11-x002... ok (22 matches) +bisect: run: test +01+11-x002... FAIL (23 matches) +bisect: run: test +001+11-x002... ok (12 matches) +bisect: run: test +101+11-x002... FAIL (11 matches) +bisect: run: test +0101+11-x002... ok (6 matches) +bisect: run: test +1101+11-x002... ok (5 matches) +bisect: run: test +0101+11+1101-x002... FAIL (6 matches) +bisect: run: test +00101+11+1101-x002... FAIL (3 matches) +bisect: run: test +000101+11+1101-x002... FAIL (2 matches) +bisect: run: test +0000101+11+1101-x002... ok (1 matches) +bisect: run: test +1000101+11+1101-x002... FAIL (1 matches) +bisect: run: test +1101+11+x045-x002... FAIL (5 matches) +bisect: run: test +01101+11+x045-x002... FAIL (3 matches) +bisect: run: test +001101+11+x045-x002... FAIL (2 matches) +bisect: run: test +0001101+11+x045-x002... FAIL (1 matches) +bisect: run: test +11+x045+x00d-x002... FAIL (22 matches) +bisect: run: test +011+x045+x00d-x002... ok (11 matches) +bisect: run: test +111+x045+x00d-x002... FAIL (11 matches) +bisect: run: test +0111+x045+x00d-x002... FAIL (6 matches) +bisect: run: test +00111+x045+x00d-x002... FAIL (3 matches) +bisect: run: test +000111+x045+x00d-x002... ok (2 matches) +bisect: run: test +100111+x045+x00d-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+1000101+0001101+100111-0000010... FAIL (3 matches) +bisect: run: test v+x045+x00d+x027-x002... FAIL (3 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -1000101-0001101-100111-0000010... FAIL (86 matches) +bisect: run: test -x045-x00d-x027-x002... FAIL (86 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-1000101-0001101-100111-0000010... ok (44 matches) -bisect: run: test +1-1000101-0001101-100111-0000010... ok (42 matches) -bisect: run: test +0+1-1000101-0001101-100111-0000010... FAIL (44 matches) -bisect: run: test +00+1-1000101-0001101-100111-0000010... FAIL (23 matches) -bisect: run: test +000+1-1000101-0001101-100111-0000010... ok (12 matches) -bisect: run: test +100+1-1000101-0001101-100111-0000010... ok (11 matches) -bisect: run: test +000+1+100-1000101-0001101-100111-0000010... FAIL (12 matches) -bisect: run: test +0000+1+100-1000101-0001101-100111-0000010... FAIL (6 matches) -bisect: run: test +00000+1+100-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +000000+1+100-1000101-0001101-100111-0000010... ok (2 matches) -bisect: run: test +100000+1+100-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +100+1+100000-1000101-0001101-100111-0000010... FAIL (11 matches) -bisect: run: test +0100+1+100000-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +1100+1+100000-1000101-0001101-100111-0000010... FAIL (5 matches) -bisect: run: test +01100+1+100000-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +001100+1+100000-1000101-0001101-100111-0000010... FAIL (2 matches) -bisect: run: test +0001100+1+100000-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +1+100000+0001100-1000101-0001101-100111-0000010... FAIL (42 matches) -bisect: run: test +01+100000+0001100-1000101-0001101-100111-0000010... FAIL (21 matches) -bisect: run: test +001+100000+0001100-1000101-0001101-100111-0000010... FAIL (12 matches) -bisect: run: test +0001+100000+0001100-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +1001+100000+0001100-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +0001+100000+0001100+1001-1000101-0001101-100111-0000010... FAIL (6 matches) -bisect: run: test +00001+100000+0001100+1001-1000101-0001101-100111-0000010... ok (3 matches) -bisect: run: test +10001+100000+0001100+1001-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +010001+100000+0001100+1001-1000101-0001101-100111-0000010... ok (2 matches) -bisect: run: test +110001+100000+0001100+1001-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +1001+100000+0001100+110001-1000101-0001101-100111-0000010... FAIL (6 matches) -bisect: run: test +01001+100000+0001100+110001-1000101-0001101-100111-0000010... ok (3 matches) -bisect: run: test +11001+100000+0001100+110001-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +011001+100000+0001100+110001-1000101-0001101-100111-0000010... FAIL (2 matches) -bisect: run: test +0011001+100000+0001100+110001-1000101-0001101-100111-0000010... ok (1 matches) -bisect: run: test +1011001+100000+0001100+110001-1000101-0001101-100111-0000010... FAIL (1 matches) +bisect: run: test +0-x045-x00d-x027-x002... ok (44 matches) +bisect: run: test +1-x045-x00d-x027-x002... ok (42 matches) +bisect: run: test +0+1-x045-x00d-x027-x002... FAIL (44 matches) +bisect: run: test +00+1-x045-x00d-x027-x002... FAIL (23 matches) +bisect: run: test +000+1-x045-x00d-x027-x002... ok (12 matches) +bisect: run: test +100+1-x045-x00d-x027-x002... ok (11 matches) +bisect: run: test +000+1+100-x045-x00d-x027-x002... FAIL (12 matches) +bisect: run: test +0000+1+100-x045-x00d-x027-x002... FAIL (6 matches) +bisect: run: test +00000+1+100-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +000000+1+100-x045-x00d-x027-x002... ok (2 matches) +bisect: run: test +100000+1+100-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +100+1+x020-x045-x00d-x027-x002... FAIL (11 matches) +bisect: run: test +0100+1+x020-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +1100+1+x020-x045-x00d-x027-x002... FAIL (5 matches) +bisect: run: test +01100+1+x020-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +001100+1+x020-x045-x00d-x027-x002... FAIL (2 matches) +bisect: run: test +0001100+1+x020-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +1+x020+x00c-x045-x00d-x027-x002... FAIL (42 matches) +bisect: run: test +01+x020+x00c-x045-x00d-x027-x002... FAIL (21 matches) +bisect: run: test +001+x020+x00c-x045-x00d-x027-x002... FAIL (12 matches) +bisect: run: test +0001+x020+x00c-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +1001+x020+x00c-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +0001+x020+x00c+1001-x045-x00d-x027-x002... FAIL (6 matches) +bisect: run: test +00001+x020+x00c+1001-x045-x00d-x027-x002... ok (3 matches) +bisect: run: test +10001+x020+x00c+1001-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +010001+x020+x00c+1001-x045-x00d-x027-x002... ok (2 matches) +bisect: run: test +110001+x020+x00c+1001-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +1001+x020+x00c+x031-x045-x00d-x027-x002... FAIL (6 matches) +bisect: run: test +01001+x020+x00c+x031-x045-x00d-x027-x002... ok (3 matches) +bisect: run: test +11001+x020+x00c+x031-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +011001+x020+x00c+x031-x045-x00d-x027-x002... FAIL (2 matches) +bisect: run: test +0011001+x020+x00c+x031-x045-x00d-x027-x002... ok (1 matches) +bisect: run: test +1011001+x020+x00c+x031-x045-x00d-x027-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+100000+0001100+110001+1011001-1000101-0001101-100111-0000010... FAIL (4 matches) +bisect: run: test v+x020+x00c+x031+x059-x045-x00d-x027-x002... FAIL (4 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (82 matches) +bisect: run: test -x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (82 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (42 matches) -bisect: run: test +1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (40 matches) -bisect: run: test +0+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (42 matches) -bisect: run: test +00+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (21 matches) -bisect: run: test +10+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (21 matches) -bisect: run: test +010+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (10 matches) -bisect: run: test +110+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (11 matches) -bisect: run: test +0110+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (6 matches) -bisect: run: test +00110+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (3 matches) -bisect: run: test +000110+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (2 matches) -bisect: run: test +0000110+1-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (1 matches) -bisect: run: test +1+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (40 matches) -bisect: run: test +01+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (19 matches) -bisect: run: test +11+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (21 matches) -bisect: run: test +011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (11 matches) -bisect: run: test +0011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (6 matches) -bisect: run: test +1011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (5 matches) -bisect: run: test +01011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (3 matches) -bisect: run: test +11011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (2 matches) -bisect: run: test +011011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (1 matches) -bisect: run: test +111011+0000110-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (1 matches) +bisect: run: test +0-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (42 matches) +bisect: run: test +1-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (40 matches) +bisect: run: test +0+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (42 matches) +bisect: run: test +00+1-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (21 matches) +bisect: run: test +10+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (21 matches) +bisect: run: test +010+1-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (10 matches) +bisect: run: test +110+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (11 matches) +bisect: run: test +0110+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (6 matches) +bisect: run: test +00110+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (3 matches) +bisect: run: test +000110+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (2 matches) +bisect: run: test +0000110+1-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (1 matches) +bisect: run: test +1+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (40 matches) +bisect: run: test +01+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (19 matches) +bisect: run: test +11+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (21 matches) +bisect: run: test +011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (11 matches) +bisect: run: test +0011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (6 matches) +bisect: run: test +1011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (5 matches) +bisect: run: test +01011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (3 matches) +bisect: run: test +11011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (2 matches) +bisect: run: test +011011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (1 matches) +bisect: run: test +111011+x006-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000110+111011-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... FAIL (2 matches) +bisect: run: test v+x006+x03b-x020-x00c-x031-x059-x045-x00d-x027-x002... FAIL (2 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000110-111011-100000-0001100-110001-1011001-1000101-0001101-100111-0000010... ok (80 matches) +bisect: run: test -x006-x03b-x020-x00c-x031-x059-x045-x00d-x027-x002... ok (80 matches) bisect: target succeeds with all remaining changes enabled diff --git a/cmd/bisect/testdata/negate.txt b/cmd/bisect/testdata/negate.txt index 53f617cc0c7..92ace596413 100644 --- a/cmd/bisect/testdata/negate.txt +++ b/cmd/bisect/testdata/negate.txt @@ -23,35 +23,35 @@ bisect: run: test !+00010... FAIL (3 matches) bisect: run: test !+000010... FAIL (2 matches) bisect: run: test !+0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v!+0000010... FAIL (1 matches) +bisect: run: test v!+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test !-0000010... FAIL (89 matches) +bisect: run: test !-x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test !+0-0000010... ok (44 matches) -bisect: run: test !+1-0000010... ok (45 matches) -bisect: run: test !+0+1-0000010... FAIL (44 matches) -bisect: run: test !+00+1-0000010... ok (23 matches) -bisect: run: test !+10+1-0000010... FAIL (21 matches) -bisect: run: test !+010+1-0000010... ok (10 matches) -bisect: run: test !+110+1-0000010... FAIL (11 matches) -bisect: run: test !+0110+1-0000010... FAIL (6 matches) -bisect: run: test !+00110+1-0000010... FAIL (3 matches) -bisect: run: test !+000110+1-0000010... FAIL (2 matches) -bisect: run: test !+0000110+1-0000010... FAIL (1 matches) -bisect: run: test !+1+0000110-0000010... FAIL (45 matches) -bisect: run: test !+01+0000110-0000010... ok (23 matches) -bisect: run: test !+11+0000110-0000010... FAIL (22 matches) -bisect: run: test !+011+0000110-0000010... FAIL (11 matches) -bisect: run: test !+0011+0000110-0000010... ok (6 matches) -bisect: run: test !+1011+0000110-0000010... FAIL (5 matches) -bisect: run: test !+01011+0000110-0000010... ok (3 matches) -bisect: run: test !+11011+0000110-0000010... FAIL (2 matches) -bisect: run: test !+011011+0000110-0000010... ok (1 matches) -bisect: run: test !+111011+0000110-0000010... FAIL (1 matches) +bisect: run: test !+0-x002... ok (44 matches) +bisect: run: test !+1-x002... ok (45 matches) +bisect: run: test !+0+1-x002... FAIL (44 matches) +bisect: run: test !+00+1-x002... ok (23 matches) +bisect: run: test !+10+1-x002... FAIL (21 matches) +bisect: run: test !+010+1-x002... ok (10 matches) +bisect: run: test !+110+1-x002... FAIL (11 matches) +bisect: run: test !+0110+1-x002... FAIL (6 matches) +bisect: run: test !+00110+1-x002... FAIL (3 matches) +bisect: run: test !+000110+1-x002... FAIL (2 matches) +bisect: run: test !+0000110+1-x002... FAIL (1 matches) +bisect: run: test !+1+x006-x002... FAIL (45 matches) +bisect: run: test !+01+x006-x002... ok (23 matches) +bisect: run: test !+11+x006-x002... FAIL (22 matches) +bisect: run: test !+011+x006-x002... FAIL (11 matches) +bisect: run: test !+0011+x006-x002... ok (6 matches) +bisect: run: test !+1011+x006-x002... FAIL (5 matches) +bisect: run: test !+01011+x006-x002... ok (3 matches) +bisect: run: test !+11011+x006-x002... FAIL (2 matches) +bisect: run: test !+011011+x006-x002... ok (1 matches) +bisect: run: test !+111011+x006-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v!+0000110+111011-0000010... FAIL (2 matches) +bisect: run: test v!+x006+x03b-x002... FAIL (2 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test !-0000110-111011-0000010... ok (87 matches) +bisect: run: test !-x006-x03b-x002... ok (87 matches) bisect: target succeeds with all remaining changes disabled diff --git a/cmd/bisect/testdata/rand.txt b/cmd/bisect/testdata/rand.txt index 7e0e3320aa6..74c2659ed1a 100644 --- a/cmd/bisect/testdata/rand.txt +++ b/cmd/bisect/testdata/rand.txt @@ -22,38 +22,38 @@ bisect: run: test +00010... FAIL (3 matches) bisect: run: test +000010... FAIL (2 matches) bisect: run: test +0000010... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000010... FAIL (1 matches) +bisect: run: test v+x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000010... FAIL (89 matches) +bisect: run: test -x002... FAIL (89 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000010... FAIL (44 matches) -bisect: run: test +00-0000010... ok (23 matches) -bisect: run: test +10-0000010... FAIL (21 matches) -bisect: run: test +010-0000010... ok (10 matches) -bisect: run: test +110-0000010... FAIL (11 matches) -bisect: run: test +0110-0000010... FAIL (6 matches) -bisect: run: test +00110-0000010... FAIL (3 matches) -bisect: run: test +000110-0000010... FAIL (2 matches) -bisect: run: test +0000110-0000010... FAIL (1 matches) +bisect: run: test +0-x002... FAIL (44 matches) +bisect: run: test +00-x002... ok (23 matches) +bisect: run: test +10-x002... FAIL (21 matches) +bisect: run: test +010-x002... ok (10 matches) +bisect: run: test +110-x002... FAIL (11 matches) +bisect: run: test +0110-x002... FAIL (6 matches) +bisect: run: test +00110-x002... FAIL (3 matches) +bisect: run: test +000110-x002... FAIL (2 matches) +bisect: run: test +0000110-x002... FAIL (1 matches) bisect: confirming failing change set -bisect: run: test v+0000110-0000010... FAIL (1 matches) +bisect: run: test v+x006-x002... FAIL (1 matches) bisect: FOUND failing change set bisect: checking for more failures -bisect: run: test -0000110-0000010... FAIL (88 matches) +bisect: run: test -x006-x002... FAIL (88 matches) bisect: target still fails; searching for more bad changes -bisect: run: test +0-0000110-0000010... ok (43 matches) -bisect: run: test +1-0000110-0000010... FAIL (45 matches) -bisect: run: test +01-0000110-0000010... FAIL (23 matches) -bisect: run: test +001-0000110-0000010... ok (12 matches) -bisect: run: test +101-0000110-0000010... FAIL (11 matches) -bisect: run: test +0101-0000110-0000010... ok (6 matches) -bisect: run: test +1101-0000110-0000010... FAIL (5 matches) -bisect: run: test +01101-0000110-0000010... ok (3 matches) -bisect: run: test +11101-0000110-0000010... ok (2 matches) -bisect: run: test +01101+11101-0000110-0000010... FAIL (3 matches) -bisect: run: test +001101+11101-0000110-0000010... ok (2 matches) -bisect: run: test +101101+11101-0000110-0000010... ok (1 matches) -bisect: run: test +001101+11101+101101-0000110-0000010... ok (2 matches) +bisect: run: test +0-x006-x002... ok (43 matches) +bisect: run: test +1-x006-x002... FAIL (45 matches) +bisect: run: test +01-x006-x002... FAIL (23 matches) +bisect: run: test +001-x006-x002... ok (12 matches) +bisect: run: test +101-x006-x002... FAIL (11 matches) +bisect: run: test +0101-x006-x002... ok (6 matches) +bisect: run: test +1101-x006-x002... FAIL (5 matches) +bisect: run: test +01101-x006-x002... ok (3 matches) +bisect: run: test +11101-x006-x002... ok (2 matches) +bisect: run: test +01101+11101-x006-x002... FAIL (3 matches) +bisect: run: test +001101+11101-x006-x002... ok (2 matches) +bisect: run: test +101101+11101-x006-x002... ok (1 matches) +bisect: run: test +001101+11101+101101-x006-x002... ok (2 matches) bisect: fatal error: target fails inconsistently diff --git a/internal/bisect/bisect.go b/internal/bisect/bisect.go index 870af6c132e..50cf53b4b42 100644 --- a/internal/bisect/bisect.go +++ b/internal/bisect/bisect.go @@ -191,17 +191,35 @@ func New(pattern string) (*Matcher, error) { result := true bits := uint64(0) start := 0 + wid := 1 // 1-bit (binary); sometimes 4-bit (hex) for i := 0; i <= len(p); i++ { // Imagine a trailing - at the end of the pattern to flush final suffix c := byte('-') if i < len(p) { c = p[i] } + if i == start && wid == 1 && c == 'x' { // leading x for hex + start = i + 1 + wid = 4 + continue + } switch c { default: return nil, &parseError{"invalid pattern syntax: " + pattern} + case '2', '3', '4', '5', '6', '7', '8', '9': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + fallthrough case '0', '1': - bits = bits<<1 | uint64(c-'0') + bits <<= wid + bits |= uint64(c - '0') + case 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', 'E', 'F': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + bits <<= 4 + bits |= uint64(c&^0x20 - 'A' + 10) case 'y': if i+1 < len(p) && (p[i+1] == '0' || p[i+1] == '1') { return nil, &parseError{"invalid pattern syntax: " + pattern} @@ -213,7 +231,7 @@ func New(pattern string) (*Matcher, error) { return nil, &parseError{"invalid pattern syntax (+ after -): " + pattern} } if i > 0 { - n := i - start + n := (i - start) * wid if n > 64 { return nil, &parseError{"pattern bits too long: " + pattern} } @@ -232,6 +250,7 @@ func New(pattern string) (*Matcher, error) { bits = 0 result = c == '+' start = i + 1 + wid = 1 } } return m, nil From 18186f0c6b44b14f5435c30346536c63acc752ad Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Wed, 10 May 2023 07:49:25 -0400 Subject: [PATCH 016/109] go/analysis/passes/slog: simplify function matching Simplify the identification of functions and methods that need to be checked by making them more explicit. Change-Id: I81efcb763613cf10de87902c6c3bbc929d35bae5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494155 Run-TryBot: Jonathan Amsterdam TryBot-Result: Gopher Robot Reviewed-by: Tim King gopls-CI: kokoro --- go/analysis/passes/slog/slog.go | 75 ++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/go/analysis/passes/slog/slog.go b/go/analysis/passes/slog/slog.go index cf9b77873c7..874ebecf069 100644 --- a/go/analysis/passes/slog/slog.go +++ b/go/analysis/passes/slog/slog.go @@ -172,50 +172,55 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) { if pkg := fn.Pkg(); pkg == nil || pkg.Path() != "log/slog" { return 0, false } + var recvName string // by default a slog package function recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - if fn.Name() == "Group" { - return 0, true + if recv != nil { + t := recv.Type() + if pt, ok := t.(*types.Pointer); ok { + t = pt.Elem() } - skip, ok := slogOutputFuncs[fn.Name()] - return skip, ok - } - var recvName string - if pt, ok := recv.Type().(*types.Pointer); ok { - if nt, ok := pt.Elem().(*types.Named); ok { + if nt, ok := t.(*types.Named); !ok { + return 0, false + } else { recvName = nt.Obj().Name() } } - if recvName == "" { - return 0, false - } - // The methods on *Logger include all the top-level output methods, as well as "With". - if recvName == "Logger" { - if fn.Name() == "With" { - return 0, true - } - skip, ok := slogOutputFuncs[fn.Name()] - return skip, ok - } - if recvName == "Record" && fn.Name() == "Add" { - return 0, true - } - return 0, false + skip, ok := kvFuncs[recvName][fn.Name()] + return skip, ok } -// The names of top-level functions and *Logger methods in log/slog that take +// The names of functions and methods in log/slog that take // ...any for key-value pairs, mapped to the number of initial args to skip in // order to get to the ones that match the ...any parameter. -var slogOutputFuncs = map[string]int{ - "Debug": 1, - "Info": 1, - "Warn": 1, - "Error": 1, - "DebugCtx": 2, - "InfoCtx": 2, - "WarnCtx": 2, - "ErrorCtx": 2, - "Log": 3, +// The first key is the dereferenced receiver type name, or "" for a function. +var kvFuncs = map[string]map[string]int{ + "": map[string]int{ + "Debug": 1, + "Info": 1, + "Warn": 1, + "Error": 1, + "DebugCtx": 2, + "InfoCtx": 2, + "WarnCtx": 2, + "ErrorCtx": 2, + "Log": 3, + "Group": 0, + }, + "Logger": map[string]int{ + "Debug": 1, + "Info": 1, + "Warn": 1, + "Error": 1, + "DebugCtx": 2, + "InfoCtx": 2, + "WarnCtx": 2, + "ErrorCtx": 2, + "Log": 3, + "With": 0, + }, + "Record": map[string]int{ + "Add": 0, + }, } // isMethodExpr reports whether a call is to a MethodExpr. From abeba28e71b381b49a208f5c796b6bba8d01de25 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 May 2023 12:17:22 -0400 Subject: [PATCH 017/109] gopls/internal/regtest/marker: support proxy files in marker tests Add new functionality to wire-in a file-based GOPROXY value for marker tests containing files starting with "proxy/". Use this to port an example regression test, adding better coverage for go.work support while doing so. To solve one of the main pain points from working with proxy files in the regtests, add a -write_sumfile=dir1,dir2 flag to the test framework, to allow auto-generation of go.sum files. Along the way, loosen the diagnostic matching logic, ignoring end positions. Change-Id: I8421cea807fd87dcbe6b1619720a46b3f1f7bc3f Reviewed-on: https://go-review.googlesource.com/c/tools/+/494396 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Alan Donovan Run-TryBot: Robert Findley --- gopls/internal/lsp/fake/sandbox.go | 5 +- gopls/internal/lsp/regtest/marker.go | 87 ++++++++++++++----- gopls/internal/lsp/regtest/wrappers.go | 6 +- .../regtest/diagnostics/diagnostics_test.go | 2 +- .../undeclared.txt | 0 .../testdata/quickfix/unusedrequire.txt | 24 +++++ .../quickfix/unusedrequire_gowork.txt | 49 +++++++++++ .../internal/regtest/misc/definition_test.go | 2 +- .../internal/regtest/misc/references_test.go | 2 +- .../internal/regtest/modfile/modfile_test.go | 42 --------- gopls/internal/regtest/watch/watch_test.go | 2 +- 11 files changed, 146 insertions(+), 75 deletions(-) rename gopls/internal/regtest/marker/testdata/{undeclaredname => quickfix}/undeclared.txt (100%) create mode 100644 gopls/internal/regtest/marker/testdata/quickfix/unusedrequire.txt create mode 100644 gopls/internal/regtest/marker/testdata/quickfix/unusedrequire_gowork.txt diff --git a/gopls/internal/lsp/fake/sandbox.go b/gopls/internal/lsp/fake/sandbox.go index a1557569bd7..7afdb99a818 100644 --- a/gopls/internal/lsp/fake/sandbox.go +++ b/gopls/internal/lsp/fake/sandbox.go @@ -254,10 +254,11 @@ func (sb *Sandbox) goCommandInvocation() gocommand.Invocation { // RunGoCommand executes a go command in the sandbox. If checkForFileChanges is // true, the sandbox scans the working directory and emits file change events // for any file changes it finds. -func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args []string, checkForFileChanges bool) error { +func (sb *Sandbox) RunGoCommand(ctx context.Context, dir, verb string, args, env []string, checkForFileChanges bool) error { inv := sb.goCommandInvocation() inv.Verb = verb inv.Args = args + inv.Env = append(inv.Env, env...) if dir != "" { inv.WorkingDir = sb.Workdir.AbsPath(dir) } @@ -289,7 +290,7 @@ func (sb *Sandbox) GoVersion(ctx context.Context) (int, error) { func (sb *Sandbox) Close() error { var goCleanErr error if sb.gopath != "" { - goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false) + goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, nil, false) } err := robustio.RemoveAll(sb.rootdir) if err != nil || goCleanErr != nil { diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index a1d2d6c60ad..bbab4376b2e 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -97,12 +97,14 @@ var update = flag.Bool("update", false, "if set, update test data during marker // // # Special files // -// There are three types of file within the test archive that are given special +// There are several types of file within the test archive that are given special // treatment by the test runner: // - "flags": this file is treated as a whitespace-separated list of flags // that configure the MarkerTest instance. Supported flags: // -min_go=go1.18 sets the minimum Go version for the test; // -cgo requires that CGO_ENABLED is set and the cgo tool is available +// -write_sumfile=a,b,c instructs the test runner to generate go.sum files +// in these directories before running the test. // TODO(rfindley): support flag values containing whitespace. // - "settings.json": this file is parsed as JSON, and used as the // session configuration (see gopls/doc/settings.md) @@ -115,6 +117,9 @@ var update = flag.Bool("update", false, "if set, update test data during marker // Foo were of type *Golden, the test runner would convert the identifier a // in the call @foo(a, "b", 3) into a *Golden by collecting golden file // data starting with "@a/". +// - proxy files: any file starting with proxy/ is treated as a Go proxy +// file. If present, these files are written to a separate temporary +// directory and GOPROXY is set to file://. // // # Marker types // @@ -136,10 +141,8 @@ var update = flag.Bool("update", false, "if set, update test data during marker // a 1:1 correspondence between observed diagnostics and diag annotations. // The diagnostics source and kind fields are ignored, to reduce fuss. // -// The marker must accurately represent the diagnostic's range. -// Use grouping parens in the location regular expression to indicate -// a portion in context. -// TODO(adonovan): make this less strict, like the old framework. +// The specified location must match the start position of the diagnostic, +// but end positions are ignored. // // TODO(adonovan): in the older marker framework, the annotation asserted // two additional fields (source="compiler", kind="error"). Restore them? @@ -357,10 +360,17 @@ func RunMarkerTests(t *testing.T, dir string) { } config.Settings["diagnosticsDelay"] = "10ms" } - run := &markerTestRun{ - test: test, - env: newEnv(t, cache, test.files, config), + var writeGoSum []string + if test.writeGoSum != "" { + for _, d := range strings.Split(test.writeGoSum, ",") { + writeGoSum = append(writeGoSum, strings.TrimSpace(d)) + } + } + + run := &markerTestRun{ + test: test, + env: newEnv(t, cache, test.files, test.proxyFiles, writeGoSum, config), locations: make(map[expect.Identifier]protocol.Location), diags: make(map[protocol.Location][]protocol.Diagnostic), } @@ -397,8 +407,11 @@ func RunMarkerTests(t *testing.T, dir string) { uri := run.env.Sandbox.Workdir.URI(path) for _, diag := range params.Diagnostics { loc := protocol.Location{ - URI: uri, - Range: diag.Range, + URI: uri, + Range: protocol.Range{ + Start: diag.Range.Start, + End: diag.Range.Start, // ignore end positions + }, } run.diags[loc] = append(run.diags[loc], diag) } @@ -546,21 +559,23 @@ var markerFuncs = map[string]markerFunc{ // See the documentation for RunMarkerTests for more information on the archive // format. type markerTest struct { - name string // relative path to the txtar file in the testdata dir - fset *token.FileSet // fileset used for parsing notes - content []byte // raw test content - archive *txtar.Archive // original test archive - settings map[string]interface{} // gopls settings - env map[string]string // editor environment - files map[string][]byte // data files from the archive (excluding special files) - notes []*expect.Note // extracted notes from data files - golden map[string]*Golden // extracted golden content, by identifier name + name string // relative path to the txtar file in the testdata dir + fset *token.FileSet // fileset used for parsing notes + content []byte // raw test content + archive *txtar.Archive // original test archive + settings map[string]interface{} // gopls settings + env map[string]string // editor environment + proxyFiles map[string][]byte // proxy content + files map[string][]byte // data files from the archive (excluding special files) + notes []*expect.Note // extracted notes from data files + golden map[string]*Golden // extracted golden content, by identifier name // flags holds flags extracted from the special "flags" archive file. flags []string // Parsed flags values. minGoVersion string cgo bool + writeGoSum string // comma separated dirs to write go sum for } // flagSet returns the flagset used for parsing the special "flags" file in the @@ -569,6 +584,7 @@ func (t *markerTest) flagSet() *flag.FlagSet { flags := flag.NewFlagSet(t.name, flag.ContinueOnError) flags.StringVar(&t.minGoVersion, "min_go", "", "if set, the minimum go1.X version required for this test") flags.BoolVar(&t.cgo, "cgo", false, "if set, requires cgo (both the cgo tool and CGO_ENABLED=1)") + flags.StringVar(&t.writeGoSum, "write_sumfile", "", "if set, write the sumfile for these directories") return flags } @@ -711,6 +727,13 @@ func loadMarkerTest(name string, content []byte) (*markerTest, error) { } test.golden[id].data[name] = file.Data + case strings.HasPrefix(file.Name, "proxy/"): + name := file.Name[len("proxy/"):] + if test.proxyFiles == nil { + test.proxyFiles = make(map[string][]byte) + } + test.proxyFiles[name] = file.Data + default: // ordinary file content notes, err := expect.Parse(test.fset, file.Name, file.Data) if err != nil { @@ -773,6 +796,8 @@ func formatTest(test *markerTest) ([]byte, error) { default: if _, ok := test.files[file.Name]; ok { // ordinary file arch.Files = append(arch.Files, file) + } else if strings.HasPrefix(file.Name, "proxy/") { // proxy file + arch.Files = append(arch.Files, file) } else if data, ok := updatedGolden[file.Name]; ok { // golden file arch.Files = append(arch.Files, txtar.File{Name: file.Name, Data: data}) delete(updatedGolden, file.Name) @@ -798,16 +823,22 @@ func formatTest(test *markerTest) ([]byte, error) { // // TODO(rfindley): simplify and refactor the construction of testing // environments across regtests, marker tests, and benchmarks. -func newEnv(t *testing.T, cache *cache.Cache, files map[string][]byte, config fake.EditorConfig) *Env { +func newEnv(t *testing.T, cache *cache.Cache, files, proxyFiles map[string][]byte, writeGoSum []string, config fake.EditorConfig) *Env { sandbox, err := fake.NewSandbox(&fake.SandboxConfig{ - RootDir: t.TempDir(), - GOPROXY: "https://proxy.golang.org", - Files: files, + RootDir: t.TempDir(), + Files: files, + ProxyFiles: proxyFiles, }) if err != nil { t.Fatal(err) } + for _, dir := range writeGoSum { + if err := sandbox.RunGoCommand(context.Background(), dir, "list", []string{"-mod=mod", "..."}, []string{"GOWORK=off"}, true); err != nil { + t.Fatal(err) + } + } + // Put a debug instance in the context to prevent logging to stderr. // See associated TODO in runner.go: we should revisit this pattern. ctx := context.Background() @@ -851,7 +882,7 @@ type markerTestRun struct { // Collected information. // Each @diag/@suggestedfix marker eliminates an entry from diags. locations map[expect.Identifier]protocol.Location - diags map[protocol.Location][]protocol.Diagnostic + diags map[protocol.Location][]protocol.Diagnostic // diagnostics by position; location end == start } // sprintf returns a formatted string after applying pre-processing to @@ -1324,7 +1355,14 @@ func diagMarker(mark marker, loc protocol.Location, re *regexp.Regexp) { } } +// removeDiagnostic looks for a diagnostic matching loc at the given position. +// +// If found, it returns (diag, true), and eliminates the matched diagnostic +// from the unmatched set. +// +// If not found, it returns (protocol.Diagnostic{}, false). func removeDiagnostic(mark marker, loc protocol.Location, re *regexp.Regexp) (protocol.Diagnostic, bool) { + loc.Range.End = loc.Range.Start // diagnostics ignore end position. diags := mark.run.diags[loc] for i, diag := range diags { if re.MatchString(diag.Message) { @@ -1444,6 +1482,7 @@ func codeActionErrMarker(mark marker, actionKind string, start, end protocol.Loc // the expectation of a diagnostic, but then it applies the first code // action of the specified kind suggested by the matched diagnostic. func suggestedfixMarker(mark marker, loc protocol.Location, re *regexp.Regexp, actionKind string, golden *Golden) { + loc.Range.End = loc.Range.Start // diagnostics ignore end position. // Find and remove the matching diagnostic. diag, ok := removeDiagnostic(mark, loc, re) if !ok { diff --git a/gopls/internal/lsp/regtest/wrappers.go b/gopls/internal/lsp/regtest/wrappers.go index 0315c6de343..163960fa407 100644 --- a/gopls/internal/lsp/regtest/wrappers.go +++ b/gopls/internal/lsp/regtest/wrappers.go @@ -256,7 +256,7 @@ func (e *Env) RunGenerate(dir string) { // directory. func (e *Env) RunGoCommand(verb string, args ...string) { e.T.Helper() - if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, true); err != nil { + if err := e.Sandbox.RunGoCommand(e.Ctx, "", verb, args, nil, true); err != nil { e.T.Fatal(err) } } @@ -265,7 +265,7 @@ func (e *Env) RunGoCommand(verb string, args ...string) { // relative directory of the sandbox. func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) { e.T.Helper() - if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, true); err != nil { + if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, nil, true); err != nil { e.T.Fatal(err) } } @@ -286,7 +286,7 @@ func (e *Env) GoVersion() int { func (e *Env) DumpGoSum(dir string) { e.T.Helper() - if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, true); err != nil { + if err := e.Sandbox.RunGoCommand(e.Ctx, dir, "list", []string{"-mod=mod", "..."}, nil, true); err != nil { e.T.Fatal(err) } sumFile := path.Join(dir, "/go.sum") diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index 3232114e83e..f8e59a0d0f6 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -299,7 +299,7 @@ func Hello() { InitialWorkspaceLoad, Diagnostics(env.AtRegexp("main.go", `"mod.com/bob"`)), ) - if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil { + if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, nil, true); err != nil { t.Fatal(err) } env.AfterChange( diff --git a/gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt b/gopls/internal/regtest/marker/testdata/quickfix/undeclared.txt similarity index 100% rename from gopls/internal/regtest/marker/testdata/undeclaredname/undeclared.txt rename to gopls/internal/regtest/marker/testdata/quickfix/undeclared.txt diff --git a/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire.txt b/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire.txt new file mode 100644 index 00000000000..6317b73f067 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire.txt @@ -0,0 +1,24 @@ +This test checks the suggested fix to remove unused require statements from +go.mod files. + +-- flags -- +-write_sumfile=a + +-- proxy/example.com@v1.0.0/x.go -- +package pkg +const X = 1 + +-- a/go.mod -- +module mod.com + +go 1.14 + +require example.com v1.0.0 //@suggestedfix("require", re"not used", "quickfix", a) + +-- @a/a/go.mod -- +module mod.com + +go 1.14 +-- a/main.go -- +package main +func main() {} diff --git a/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire_gowork.txt b/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire_gowork.txt new file mode 100644 index 00000000000..8a090d7fa48 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/quickfix/unusedrequire_gowork.txt @@ -0,0 +1,49 @@ +This test checks the suggested fix to remove unused require statements from +go.mod files, when a go.work file is used. + +Note that unlike unusedrequire.txt, we need not write go.sum files when +a go.work file is used. + +-- flags -- +-min_go=go1.18 + +-- proxy/example.com@v1.0.0/x.go -- +package pkg +const X = 1 + +-- go.work -- +go 1.21 + +use ( + ./a + ./b +) +-- a/go.mod -- +module mod.com/a + +go 1.14 + +require example.com v1.0.0 //@suggestedfix("require", re"not used", "quickfix", a) + +-- @a/a/go.mod -- +module mod.com/a + +go 1.14 +-- a/main.go -- +package main +func main() {} + +-- b/go.mod -- +module mod.com/b + +go 1.14 + +require example.com v1.0.0 //@suggestedfix("require", re"not used", "quickfix", b) + +-- @b/b/go.mod -- +module mod.com/b + +go 1.14 +-- b/main.go -- +package main +func main() {} diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go index c2dd67fc3c4..9f24ef6d369 100644 --- a/gopls/internal/regtest/misc/definition_test.go +++ b/gopls/internal/regtest/misc/definition_test.go @@ -476,7 +476,7 @@ const _ = b.K } // Run 'go mod vendor' outside the editor. - if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil { + if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, nil, true); err != nil { t.Fatalf("go mod vendor: %v", err) } diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index ae9a7629528..cffbd60194a 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -419,7 +419,7 @@ var _ b.B checkVendor(env.Implementations(refLoc), false) // Run 'go mod vendor' outside the editor. - if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, true); err != nil { + if err := env.Sandbox.RunGoCommand(env.Ctx, ".", "mod", []string{"vendor"}, nil, true); err != nil { t.Fatalf("go mod vendor: %v", err) } diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go index 3ac021b68b7..03e60ac80e7 100644 --- a/gopls/internal/regtest/modfile/modfile_test.go +++ b/gopls/internal/regtest/modfile/modfile_test.go @@ -336,48 +336,6 @@ require example.com v1.2.3 }) } -func TestUnusedDiag(t *testing.T) { - - const proxy = ` --- example.com@v1.0.0/x.go -- -package pkg -const X = 1 -` - const files = ` --- a/go.mod -- -module mod.com -go 1.14 -require example.com v1.0.0 --- a/go.sum -- -example.com v1.0.0 h1:38O7j5rEBajXk+Q5wzLbRN7KqMkSgEiN9NqcM1O2bBM= -example.com v1.0.0/go.mod h1:vUsPMGpx9ZXXzECCOsOmYCW7npJTwuA16yl89n3Mgls= --- a/main.go -- -package main -func main() {} -` - - const want = `module mod.com - -go 1.14 -` - - RunMultiple{ - {"default", WithOptions(ProxyFiles(proxy), WorkspaceFolders("a"))}, - {"nested", WithOptions(ProxyFiles(proxy))}, - }.Run(t, files, func(t *testing.T, env *Env) { - env.OpenFile("a/go.mod") - var d protocol.PublishDiagnosticsParams - env.AfterChange( - Diagnostics(env.AtRegexp("a/go.mod", `require example.com`)), - ReadDiagnostics("a/go.mod", &d), - ) - env.ApplyQuickFixes("a/go.mod", d.Diagnostics) - if got := env.BufferText("a/go.mod"); got != want { - t.Fatalf("unexpected go.mod content:\n%s", compare.Text(want, got)) - } - }) -} - // Test to reproduce golang/go#39041. It adds a new require to a go.mod file // that already has an unused require. func TestNewDepWithUnusedDep(t *testing.T) { diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go index 8fea96e6bfd..f485b7447f5 100644 --- a/gopls/internal/regtest/watch/watch_test.go +++ b/gopls/internal/regtest/watch/watch_test.go @@ -577,7 +577,7 @@ func main() { env.AfterChange( NoDiagnostics(ForFile("main.go")), ) - if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, true); err != nil { + if err := env.Sandbox.RunGoCommand(env.Ctx, "", "mod", []string{"init", "mod.com"}, nil, true); err != nil { t.Fatal(err) } From cd39d2be4fce96e8f75eab1708b5040975b35b8c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 May 2023 13:05:03 -0400 Subject: [PATCH 018/109] internal/lsp/cache: support loading multiple orphaned files go/packages returns at most one command-line-arguments package per query. Previously, this led to N^2 loading of orphaned files. CL 480197 fixed this by marking all queried files as unloadable after a load completes. However, as a result gopls would fail to load multiple orphaned files. Fix this properly by calling Load once for each orphaned file. Fixes golang/go#59318 Change-Id: Ibfb3742fcb70ea3976d8b1b5b384fe6b97350cf4 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494401 Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/load.go | 12 ++-- gopls/internal/lsp/cache/snapshot.go | 66 +++++++++++-------- .../marker/testdata/fixedbugs/issue59318.txt | 22 +++++++ 3 files changed, 64 insertions(+), 36 deletions(-) create mode 100644 gopls/internal/regtest/marker/testdata/fixedbugs/issue59318.txt diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index ade21ea2857..d932c953cb6 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -451,14 +451,12 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package pkgPath := PackagePath(pkg.PkgPath) id := PackageID(pkg.ID) - // TODO(rfindley): this creates at most one command-line-arguments package - // per load, but if we pass multiple file= queries to go/packages, there may - // be multiple command-line-arguments packages. - // - // As reported in golang/go#59318, this can result in accidentally quadratic - // loading behavior. if source.IsCommandLineArguments(id) { - suffix := ":" + strings.Join(query, ",") + if len(pkg.CompiledGoFiles) != 1 { + bug.Reportf("unexpected files in command-line-arguments package: %v", pkg.CompiledGoFiles) + return + } + suffix := pkg.CompiledGoFiles[0] id = PackageID(pkg.ID + suffix) pkgPath = PackagePath(pkg.PkgPath + suffix) } diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 7e9a06f5cea..71f65630e0d 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -1595,28 +1595,34 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // available in the snapshot and reload their metadata individually using a // file= query if the metadata is unavailable. files := s.orphanedOpenFiles() + if len(files) == 0 { + return nil + } - // Files without a valid package declaration can't be loaded. Don't try. - var scopes []loadScope + var uris []span.URI for _, file := range files { - pgf, err := s.ParseGo(ctx, file, source.ParseHeader) - if err != nil { - continue - } - if !pgf.File.Package.IsValid() { - continue - } - - scopes = append(scopes, fileLoadScope(file.URI())) + uris = append(uris, file.URI()) } - if len(scopes) == 0 { - return nil - } + event.Log(ctx, "reloadOrphanedFiles reloading", tag.Files.Of(uris)) - // The regtests match this exact log message, keep them in sync. - event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes)) - err := s.load(ctx, false, scopes...) + var g errgroup.Group + + cpulimit := runtime.GOMAXPROCS(0) + g.SetLimit(cpulimit) + + // Load files one-at-a-time. go/packages can return at most one + // command-line-arguments package per query. + for _, file := range files { + file := file + g.Go(func() error { + pgf, err := s.ParseGo(ctx, file, source.ParseHeader) + if err != nil || !pgf.File.Package.IsValid() { + return nil // need a valid header + } + return s.load(ctx, false, fileLoadScope(file.URI())) + }) + } // If we failed to load some files, i.e. they have no metadata, // mark the failures so we don't bother retrying until the file's @@ -1624,11 +1630,17 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // // TODO(rfindley): is it possible that the load stopped early for an // unrelated errors? If so, add a fallback? - // - // Check for context cancellation so that we don't incorrectly mark files - // as unloadable, but don't return before setting all workspace packages. - if ctx.Err() != nil { - return ctx.Err() + + if err := g.Wait(); err != nil { + // Check for context cancellation so that we don't incorrectly mark files + // as unloadable, but don't return before setting all workspace packages. + if ctx.Err() != nil { + return ctx.Err() + } + + if !errors.Is(err, errNoPackages) { + event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Files.Of(uris)) + } } // If the context was not canceled, we assume that the result of loading @@ -1637,19 +1649,15 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // prevents us from falling into recursive reloading where we only make a bit // of progress each time. s.mu.Lock() - for _, scope := range scopes { + defer s.mu.Unlock() + for _, file := range files { // TODO(rfindley): instead of locking here, we should have load return the // metadata graph that resulted from loading. - uri := span.URI(scope.(fileLoadScope)) + uri := file.URI() if s.noValidMetadataForURILocked(uri) { s.unloadableFiles[uri] = struct{}{} } } - s.mu.Unlock() - - if err != nil && !errors.Is(err, errNoPackages) { - event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes)) - } return nil } diff --git a/gopls/internal/regtest/marker/testdata/fixedbugs/issue59318.txt b/gopls/internal/regtest/marker/testdata/fixedbugs/issue59318.txt new file mode 100644 index 00000000000..65385f703e5 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/fixedbugs/issue59318.txt @@ -0,0 +1,22 @@ +This test verifies that we can load multiple orphaned files as +command-line-arguments packages. + +Previously, we would load only one because go/packages returns at most one +command-line-arguments package per query. + +-- a/main.go -- +package main + +func main() { + var a int //@diag(re"var (a)", re"not used") +} +-- b/main.go -- +package main + +func main() { + var b int //@diag(re"var (b)", re"not used") +} +-- c/go.mod -- +module c.com // The existence of this module avoids a workspace error. + +go 1.18 From 01128f9fbc93e71b7c7c08087eabe6be3850c54c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 11 May 2023 16:40:11 -0400 Subject: [PATCH 019/109] gopls/internal/lsp/source: fix bug renaming field to unexported The logic in rename that detects whether renaming an exported identifier (to an unexported name) would break existing references from other packages was only executed in the declaring package, where it is of course never needed. This change causes it to run unconditionally. Conceptually, this CL replaces a return with a forward goto, but the indentation makes the diff messier. Fixes golang/go#59403 Change-Id: Iad0963ce80bf1412481199acc1fcc238ad91225e Reviewed-on: https://go-review.googlesource.com/c/tools/+/494440 TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan Auto-Submit: Alan Donovan Reviewed-by: Robert Findley --- gopls/internal/lsp/source/rename_check.go | 92 +++++++++---------- .../marker/testdata/rename/unexported.txt | 25 +++++ 2 files changed, 71 insertions(+), 46 deletions(-) create mode 100644 gopls/internal/regtest/marker/testdata/rename/unexported.txt diff --git a/gopls/internal/lsp/source/rename_check.go b/gopls/internal/lsp/source/rename_check.go index a858bb7faaf..7affb7675c5 100644 --- a/gopls/internal/lsp/source/rename_check.go +++ b/gopls/internal/lsp/source/rename_check.go @@ -398,63 +398,63 @@ func (r *renamer) checkLabel(label *types.Label) { // checkStructField checks that the field renaming will not cause // conflicts at its declaration, or ambiguity or changes to any selection. func (r *renamer) checkStructField(from *types.Var) { - // Check that the struct declaration is free of field conflicts, - // and field/method conflicts. + // If this is the declaring package, check that the struct + // declaration is free of field conflicts, and field/method + // conflicts. + // // go/types offers no easy way to get from a field (or interface // method) to its declaring struct (or interface), so we must // ascend the AST. - pgf, ok := enclosingFile(r.pkg, from.Pos()) - if !ok { - return // not declared by syntax of this package - } - path, _ := astutil.PathEnclosingInterval(pgf.File, from.Pos(), from.Pos()) - // path matches this pattern: - // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File] - - // Ascend to FieldList. - var i int - for { - if _, ok := path[i].(*ast.FieldList); ok { - break + if pgf, ok := enclosingFile(r.pkg, from.Pos()); ok { + path, _ := astutil.PathEnclosingInterval(pgf.File, from.Pos(), from.Pos()) + // path matches this pattern: + // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File] + + // Ascend to FieldList. + var i int + for { + if _, ok := path[i].(*ast.FieldList); ok { + break + } + i++ } i++ - } - i++ - tStruct := path[i].(*ast.StructType) - i++ - // Ascend past parens (unlikely). - for { - _, ok := path[i].(*ast.ParenExpr) - if !ok { - break - } + tStruct := path[i].(*ast.StructType) i++ - } - if spec, ok := path[i].(*ast.TypeSpec); ok { - // This struct is also a named type. - // We must check for direct (non-promoted) field/field - // and method/field conflicts. - named := r.pkg.GetTypesInfo().Defs[spec.Name].Type() - prev, indices, _ := types.LookupFieldOrMethod(named, true, r.pkg.GetTypes(), r.to) - if len(indices) == 1 { - r.errorf(from.Pos(), "renaming this field %q to %q", - from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this %s", - objectKind(prev)) - return // skip checkSelections to avoid redundant errors - } - } else { - // This struct is not a named type. - // We need only check for direct (non-promoted) field/field conflicts. - T := r.pkg.GetTypesInfo().Types[tStruct].Type.Underlying().(*types.Struct) - for i := 0; i < T.NumFields(); i++ { - if prev := T.Field(i); prev.Name() == r.to { + // Ascend past parens (unlikely). + for { + _, ok := path[i].(*ast.ParenExpr) + if !ok { + break + } + i++ + } + if spec, ok := path[i].(*ast.TypeSpec); ok { + // This struct is also a named type. + // We must check for direct (non-promoted) field/field + // and method/field conflicts. + named := r.pkg.GetTypesInfo().Defs[spec.Name].Type() + prev, indices, _ := types.LookupFieldOrMethod(named, true, r.pkg.GetTypes(), r.to) + if len(indices) == 1 { r.errorf(from.Pos(), "renaming this field %q to %q", from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this field") + r.errorf(prev.Pos(), "\twould conflict with this %s", + objectKind(prev)) return // skip checkSelections to avoid redundant errors } + } else { + // This struct is not a named type. + // We need only check for direct (non-promoted) field/field conflicts. + T := r.pkg.GetTypesInfo().Types[tStruct].Type.Underlying().(*types.Struct) + for i := 0; i < T.NumFields(); i++ { + if prev := T.Field(i); prev.Name() == r.to { + r.errorf(from.Pos(), "renaming this field %q to %q", + from.Name(), r.to) + r.errorf(prev.Pos(), "\twould conflict with this field") + return // skip checkSelections to avoid redundant errors + } + } } } diff --git a/gopls/internal/regtest/marker/testdata/rename/unexported.txt b/gopls/internal/regtest/marker/testdata/rename/unexported.txt new file mode 100644 index 00000000000..e5631fa4907 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/rename/unexported.txt @@ -0,0 +1,25 @@ + +This test attempts to rename a.S.X to x, which would make it +inaccessible from its external test package. The rename tool +should report an error rather than wrecking the program. +See issue #59403. + +-- go.mod -- +module example.com +go 1.12 + +-- a/a.go -- +package a + +var S struct{ X int } //@renameerr("X", x, oops) + +-- a/a_test.go -- +package a_test + +import "example.com/a" + +var Y = a.S.X + +-- @oops -- +a/a.go:3:15: renaming "X" to "x" would make it unexported +a/a_test.go:5:13: breaking references from packages such as "example.com/a_test" From ad74ff6345e3663a8f1a4ba5c6e85d54a6fd5615 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 11 May 2023 18:07:57 -0400 Subject: [PATCH 020/109] go/gcexportdata: drop support for the ancient binary format Compilers and tools haven't produced it since go1.11, several years ago now. Change-Id: I5056c5bba81030a2eba5e3931190b8249524aed7 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494442 gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Matthew Dempsky Reviewed-by: Robert Findley --- go/gcexportdata/gcexportdata.go | 11 +- internal/gcimporter/bexport.go | 852 ---------------- internal/gcimporter/bexport_test.go | 184 +--- internal/gcimporter/bimport.go | 907 +----------------- internal/gcimporter/gcimporter.go | 11 +- internal/gcimporter/iexport.go | 19 + .../testdata/versions/test_go1.11_0i.a | Bin 2420 -> 0 bytes .../testdata/versions/test_go1.11_6b.a | Bin 2426 -> 0 bytes .../testdata/versions/test_go1.11_999b.a | Bin 2600 -> 0 bytes .../testdata/versions/test_go1.11_999i.a | Bin 2420 -> 0 bytes .../testdata/versions/test_go1.7_0.a | Bin 1862 -> 0 bytes .../testdata/versions/test_go1.7_1.a | Bin 2316 -> 0 bytes .../testdata/versions/test_go1.8_4.a | Bin 1658 -> 0 bytes .../testdata/versions/test_go1.8_5.a | Bin 1658 -> 0 bytes 14 files changed, 36 insertions(+), 1948 deletions(-) delete mode 100644 internal/gcimporter/bexport.go delete mode 100644 internal/gcimporter/testdata/versions/test_go1.11_0i.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.11_6b.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.11_999b.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.11_999i.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.7_0.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.7_1.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.8_4.a delete mode 100644 internal/gcimporter/testdata/versions/test_go1.8_5.a diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go index 165ede0f8f3..03543bd4bb8 100644 --- a/go/gcexportdata/gcexportdata.go +++ b/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/internal/gcimporter/bexport.go b/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d3d..00000000000 --- a/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/internal/gcimporter/bexport_test.go b/internal/gcimporter/bexport_test.go index bc2390c880c..978c46e1932 100644 --- a/internal/gcimporter/bexport_test.go +++ b/internal/gcimporter/bexport_test.go @@ -5,10 +5,9 @@ package gcimporter_test import ( + "bytes" "fmt" "go/ast" - "go/build" - "go/constant" "go/parser" "go/token" "go/types" @@ -19,157 +18,18 @@ import ( "strings" "testing" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/loader" "golang.org/x/tools/internal/gcimporter" - "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/typeparams" - "golang.org/x/tools/internal/typeparams/genericfeatures" ) var isRace = false -func TestBExportData_stdlib(t *testing.T) { - if runtime.Compiler == "gccgo" { - t.Skip("gccgo standard library is inaccessible") - } - testenv.NeedsGoBuild(t) - if isRace { - t.Skipf("stdlib tests take too long in race mode and flake on builders") - } - if testing.Short() { - t.Skip("skipping RAM hungry test in -short mode") - } - - // Load, parse and type-check the program. - ctxt := build.Default // copy - ctxt.GOPATH = "" // disable GOPATH - conf := loader.Config{ - Build: &ctxt, - AllowErrors: true, - TypeChecker: types.Config{ - Error: func(err error) { t.Log(err) }, - }, - } - for _, path := range buildutil.AllPackages(conf.Build) { - conf.Import(path) - } - - // Create a package containing type and value errors to ensure - // they are properly encoded/decoded. - f, err := conf.ParseFile("haserrors/haserrors.go", `package haserrors -const UnknownValue = "" + 0 -type UnknownType undefined -`) - if err != nil { - t.Fatal(err) - } - conf.CreateFromFiles("haserrors", f) - - prog, err := conf.Load() - if err != nil { - t.Fatalf("Load failed: %v", err) - } - - numPkgs := len(prog.AllPackages) - if want := minStdlibPackages; numPkgs < want { - t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want) - } - - checked := 0 - for pkg, info := range prog.AllPackages { - if info.Files == nil { - continue // empty directory - } - // Binary export does not support generic code. - inspect := inspector.New(info.Files) - if genericfeatures.ForPackage(inspect, &info.Info) != 0 { - t.Logf("skipping package %q which uses generics", pkg.Path()) - continue - } - checked++ - exportdata, err := gcimporter.BExportData(conf.Fset, pkg) - if err != nil { - t.Fatal(err) - } - - imports := make(map[string]*types.Package) - fset2 := token.NewFileSet() - n, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path()) - if err != nil { - t.Errorf("BImportData(%s): %v", pkg.Path(), err) - continue - } - if n != len(exportdata) { - t.Errorf("BImportData(%s) decoded %d bytes, want %d", - pkg.Path(), n, len(exportdata)) - } - - // Compare the packages' corresponding members. - for _, name := range pkg.Scope().Names() { - if !token.IsExported(name) { - continue - } - obj1 := pkg.Scope().Lookup(name) - obj2 := pkg2.Scope().Lookup(name) - if obj2 == nil { - t.Errorf("%s.%s not found, want %s", pkg.Path(), name, obj1) - continue - } - - fl1 := fileLine(conf.Fset, obj1) - fl2 := fileLine(fset2, obj2) - if fl1 != fl2 { - t.Errorf("%s.%s: got posn %s, want %s", - pkg.Path(), name, fl2, fl1) - } - - if err := equalObj(obj1, obj2); err != nil { - t.Errorf("%s.%s: %s\ngot: %s\nwant: %s", - pkg.Path(), name, err, obj2, obj1) - } - } - } - if want := minStdlibPackages; checked < want { - t.Errorf("Checked only %d packages, want at least %d", checked, want) - } -} - func fileLine(fset *token.FileSet, obj types.Object) string { posn := fset.Position(obj.Pos()) filename := filepath.Clean(strings.ReplaceAll(posn.Filename, "$GOROOT", runtime.GOROOT())) return fmt.Sprintf("%s:%d", filename, posn.Line) } -// equalObj reports how x and y differ. They are assumed to belong to -// different universes so cannot be compared directly. -func equalObj(x, y types.Object) error { - if reflect.TypeOf(x) != reflect.TypeOf(y) { - return fmt.Errorf("%T vs %T", x, y) - } - xt := x.Type() - yt := y.Type() - switch x.(type) { - case *types.Var, *types.Func: - // ok - case *types.Const: - xval := x.(*types.Const).Val() - yval := y.(*types.Const).Val() - // Use string comparison for floating-point values since rounding is permitted. - if constant.Compare(xval, token.NEQ, yval) && - !(xval.Kind() == constant.Float && xval.String() == yval.String()) { - return fmt.Errorf("unequal constants %s vs %s", xval, yval) - } - case *types.TypeName: - xt = xt.Underlying() - yt = yt.Underlying() - default: - return fmt.Errorf("unexpected %T", x) - } - return equalType(xt, yt) -} - func equalType(x, y types.Type) error { if reflect.TypeOf(x) != reflect.TypeOf(y) { return fmt.Errorf("unequal kinds: %T vs %T", x, y) @@ -448,15 +308,16 @@ func TestVeryLongFile(t *testing.T) { } // export - exportdata, err := gcimporter.BExportData(fset1, pkg) - if err != nil { + var out bytes.Buffer + if err := gcimporter.IExportData(&out, fset1, pkg); err != nil { t.Fatal(err) } + exportdata := out.Bytes() // import imports := make(map[string]*types.Package) fset2 := token.NewFileSet() - _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg.Path()) + _, pkg2, err := gcimporter.IImportData(fset2, imports, exportdata, pkg.Path()) if err != nil { t.Fatalf("BImportData(%s): %v", pkg.Path(), err) } @@ -513,38 +374,3 @@ func checkPkg(t *testing.T, pkg *types.Package, label string) { } } } - -func TestTypeAliases(t *testing.T) { - // parse and typecheck - fset1 := token.NewFileSet() - f, err := parser.ParseFile(fset1, "p.go", src, 0) - if err != nil { - t.Fatal(err) - } - var conf types.Config - pkg1, err := conf.Check("p", fset1, []*ast.File{f}, nil) - if err == nil { - // foo in undeclared in src; we should see an error - t.Fatal("invalid source type-checked without error") - } - if pkg1 == nil { - // despite incorrect src we should see a (partially) type-checked package - t.Fatal("nil package returned") - } - checkPkg(t, pkg1, "export") - - // export - exportdata, err := gcimporter.BExportData(fset1, pkg1) - if err != nil { - t.Fatal(err) - } - - // import - imports := make(map[string]*types.Package) - fset2 := token.NewFileSet() - _, pkg2, err := gcimporter.BImportData(fset2, imports, exportdata, pkg1.Path()) - if err != nil { - t.Fatalf("BImportData(%s): %v", pkg1.Path(), err) - } - checkPkg(t, pkg2, "import") -} diff --git a/internal/gcimporter/bimport.go b/internal/gcimporter/bimport.go index b85de014700..d98b0db2a9a 100644 --- a/internal/gcimporter/bimport.go +++ b/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/internal/gcimporter/gcimporter.go b/internal/gcimporter/gcimporter.go index a973dece936..5a36d0b0955 100644 --- a/internal/gcimporter/gcimporter.go +++ b/internal/gcimporter/gcimporter.go @@ -235,15 +235,14 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/internal/gcimporter/iexport.go b/internal/gcimporter/iexport.go index a0dc0b5e27d..9930d8c36a7 100644 --- a/internal/gcimporter/iexport.go +++ b/internal/gcimporter/iexport.go @@ -969,6 +969,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1188,12 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/internal/gcimporter/testdata/versions/test_go1.11_0i.a b/internal/gcimporter/testdata/versions/test_go1.11_0i.a deleted file mode 100644 index b00fefed0462172f5f5370f9769ed19342fc0c16..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2420 zcmd5--)q}e6h69muBw_KLdZ-RK{Y0XwvlDomSrzBA-GAqvNg0Rgub}4e66UFB}bN1 zGRnHwvDZCpu!ju#KkT)%u;;z(|JYw}yK^PWah)GyZ@WnM+1DqefAUVHk@lW)sAU!R=Z-ziAz*d$3R7Pll9 zcIA!Y&f_PKAMce$p6nHiH}FUCis}R)bTZs2 zZF!b|xb0G}ckQ0=MuV0B_x9eXpn!016NLK`U_uExPS8<$Mh9x4pq?oLj0}OT#_rqmo&#X{*fU1qhfxSD`(3C5+jDF$ zuq|)&{&(G~RkWJbrCP0AH}tATjdIsCbd8o9mRYWS5?ra+J!)g4U219-z0oiCdtFNF z{hC>?+FfAy*!r%Am2Oz$C~&AxLo1@a7O=a?4(QRj`j3Du#?EdKc%S6!>snnm>iurN zVOUzZZc?*eGi`%bsAf~$1lDo_6}O@$7Vyes0pbClCe#!zDr7zrrnKiC z)Ep?mkht`l%9?7!(MVim;V9&%Z~u} z&`C3<^pF0^eSS?8e@W;6X4CzqHjQ6P2bWkLA7fG=w}b~Xfskcb1`VgV488`UoRM-$ zM#>1;tSDrq9NyWuK~`LlLFOp3h`=S1gWMT#SHf54q z+T#odoZ&U^e&Pgo*>!b7pd|!ECs+{S7h$A2oh&h85CxPB_vkWqRYuG#M&X~SNmgMM zZB`t@P{fp{!UpC9b61q0`2t5RMmF&rp{ityyoT3k+e8ErZ43Su@LJ@R8XUD5qxKP+ q<vTbv3ss< z^@i{M(5+HID_KpdmGWgnFKX1tH%&v=Xue{Z`O*i*g@WCpHa6O%rdH4^?R>k{q_o^F zndPG01cq;^-R)tdo7O0B9Xg=C70}ieu#M;h^z>T%r@$6NWz%(gANXtQT3I*B?Pj}T zSX#bpQnOq#ZG#r5W>eh+w&l1g9z~7FZ%WTLT670(_YRYd*kn3^%dz4ze>=G2TLXvQ zL099b;V|e2cNkUCXcGAKtZc@vZ%bPyHi)vu6LB@L>x^L{u7-8p#GItDJsQ~4Z>ku_ za+7j=R;%Ys!?ONp5On)gb?jz07^+$x*mguUz^cQqy1E`-@yTbc0#jlbQd+)my#}@t zqNcv`<=Ada4{R+Wj|z%s1>uzemYwq1cmhlg_waVW#XGT@%E*cRpK2@e{*Sel%zLaX z*b}o@WcUR8pSUf&CrQAbNCJctzKp1E`2Jw33BU0TVcx?bRLyrTXZ&so(JmxrgiG3M zFKUVtu9KeLMmnaSX1KD(2;}M4A&&L2{JofNgD4tTV7E*^Yvf)G@qdtW{BF*1b6EGe zi0giCQulLl-Ot5!AJ5Q;J;wCU|4w~!PZWQR*Z(Es^Tsl^UyCQVNNyiuVj!3Jy8&PV zA!$g1hRaj}KLb%tNGT;DC4^*B6p~U3pJdn|DbC9va}-%b;1WqeY648li6jCu9GJ!j zftgF-!z)1P)5x>9Ox_6aMo1N90;DFBG&vTlu-Fq#P<{|_%>ELN?EXrGm|aCF0h89o z4C~}vfO8?GA;Z$POj1jA&d}iuXMFgX6Kt^i>X<-_2#8KFAi_IgqO4 E0@X}F`Tzg` diff --git a/internal/gcimporter/testdata/versions/test_go1.11_999b.a b/internal/gcimporter/testdata/versions/test_go1.11_999b.a deleted file mode 100644 index c35d22dce691e67127e04ea74ddd8c97a57e22ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2600 zcmd5-QE%c#5MFNy=W|eEYCj zofNe{t4~#>{-OSb&aBx$f#kU>+p|0K%{Q~N-rZLxPV5d&m2OwR`tYv({kfJlV9tK4GN1>%$9hg#UXWp1K*PW)>y)|`=NRW*-&W<%|Dk5#Om*JxNw>r9#U!*r~b zP0ci$Mx$A+X{J%OwbuqT?WWm}oe_(|V4Sd6=}m*7rv;u?yl%MV_s08v?Wf6Q6m@>S zx_JAs)U_|7*t!_KtKHs)x1H;^ei#;&H_#MC+23y|pIJN!!m-w9G!%tU=~==i2_YNG z@A|fKfBo+A$IHvk#w2#X^22Q&a_kIW_--(Y!Wa_RM!e@F&i0IHqM%0i!xycgGydf~ zU_+05O{vr+l#orjNhAW1m98Il^>AK5QK?63X_$73{$JMel|qOgmK#G)BLDpLr;&V# zn63|;$v`g_^@UP^kwyqP;Ej138~ZV%zGZv9{g znV&e*B=p&s#ZJP!Q?wnYb7FUEW^{*u9S-P);czK(-!$r`Rqyxu4a+ggb(`7sn(bPw z!VH(0Ho{N+P=|@>X^{1WI|EEG?uSo;jsdh=}V&d^_VEymMv z5{#JcyS*Tp=tdb=^(-YE!-<85gnq;ihxvBU5u^!MAa>%|`GU%Q?zyr%_5^ko2BC*( zkn~83oP=*VG!&Pjci*5^9g-`C$oe6cpk{Da~(`lDJVy;zog4 zbVW=-{{e#MzIi6gvuXw^X8{AKl@uyEI2~bJx6@$%1PEUGwmQYvIB5<$qk2Z$*50 zk75>_FIQD5p>37s>5beZJ44wf8d^aX7v%GO;k|hPJq4YWL!pz878ng++&xoiZnuMs z17y5kpgjiH=els+Gz1+rDI`qsK{i@_81CZGq8iaMXf;*%R8ePV0gI{7COseryqA?D z63LJRsHGFggVXp7#Hd+yYxGLd9RSj+|6dE7!@>Xn diff --git a/internal/gcimporter/testdata/versions/test_go1.11_999i.a b/internal/gcimporter/testdata/versions/test_go1.11_999i.a deleted file mode 100644 index 99401d7c37ca48fbca32f0448b71606b7f853ce4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2420 zcmd5-&uimG6n+|9N2{11LU7Geg4UQ2(nOZ!Uy7R;W4zgI15L=fG&ip-j}>(l%aP@5 z1EJe%>9vOzdPrgahhCRm=((5v4ZZd+nDmWgC0^&p(pv{<-psuBeecaDjr3^C^sS@1 z)aj`EyH8q=ca*4U6U$9Bj`~We6ngYZWEJJPWP6I&{f1hB(lh;+&OkAbd+6@bQ|c<8 zmWzf_EK|}RnwHiTHiO1>Ckfq0rjQs*l~M` z(^Im$U%veM#jyKq|CxPI9T!}uqv`px)3g4;aP;{oXzzd9(!9g7QZ*}WV3Q;%mupJS z-*fx&R_@8(!QNgwKk}`-?d3!2f$8RL%Q+r;J|@ZsbQJW=z|5agf8=dW>OKs7V9D=%9a!78T+cGy z(c9m3vs~25W|wN^LbaloG+HTijf$?(!lr2y%I^gii&l?X*l3p;T2bHZ7y7*}rPY4f zsFtiQFnnr#$Hhvw%yHn^bU=MGpuIY<^=Jq5^7VDCs#mK0Zhy04 zYK5vnjcVDjDzr#7i|Pijy6vgB6*VHiO`ZluID@`-Psn;4GArP6?6}gei7UQ2u<1Q? zEsh$Eg5%(xpc)2U7k*QftvK~vvSs3cNHs3RbYV9cLm{Tix*1|z%h(?etl1}3ifg&e zIoYe}+Ox2%KOO|mF;#7=>jWcJD*$UmR0I6p@T0G;@pC@)TC2d67>1Ol@0(}9av^Hw zEnkk~CiK8oBl742Gc<$n$NpzUBRcnI`(tQxc$`tI5;b)!vS0ntc=WXBNKY;)8AHY3y z;*2rW<9?>Dw_{AxV7!}9nL69c&=JeUcDq#+F&PE!ed4MaI1rIdt} z5RyqzNJ=TZlVO9TxFCbfQDhN;OC$xUYhYSVBoUb5z%1wjXNQEe%B)CVHu`4lRBryvAOf|9sD`=DA z5QZYAyb!i9Cm8#p1hwZlsx#8SbA+mrCOL<5v@Ifnh_(*@2zV{>N-G@I2vLU!O>$^+ nD3dor{X)Qn<{JT5T7U9>hmi$T8}%kKFB9#&rSuczE%^q0%WFuW&wc6l=qK3gNV1LV2CkISWwo<2znR(DnOWWbk`=+lSITs1 zJbduY`PcXKW|%8lk!IhlE~PbMkKL8VV{4ha}V4EOeHssAj^ZGh&GQ1o9aT0V zE7|B7>L+=&s3k_6(@*cFEPHY<;b{n;kv(Zj0QL#&g9Lc0GL7@8k*`v;$zj8PET0xV z^BVO3%BON$0l*rK(@+9ome1$hFfQ2Q!sz#nt;>lH@;??fh+eI;#5~Q*0)&k6i-ycW z9M$0Qj_tQ`kmdnP7w_MizUT7DnM6*+L(e&jEb6nfh=$|9wIhc9x0JC3LLT7YKKD$= zoW5pom1AG}q=&cQ38gfS>1SbHk^94bS&&D_~y4t#C474yO~F zx{e=?EsI(n1K~A}t9(YwEZB8V*XMo=G#Zel@vd3FSY~B1<3=3#Nx3jg3&fp9JHtm- zb*gb(_3Tyw4G{X;3|+FKV9!AeYO9@U(KxTqPLu43v0KW+wUgD^AcoD!&10FfvhExZ z{hffO7=c>Luya7T4gB{`d;c~9VK?Y*K`QL>muo}l$gTT7pAXdgKb;SHAILu7Z0*CZ z$Vnea8*sMT_y(I%(LMYsy53j(RYQ_M-OZ!CGFsC&v>SmeyI1*Y@NSULW-JR&F}M7b z74dS$vvPr9P1g?BjvoBp`TV*pzgqKG(e2A_>khY>f3O#XkS=t=MBR2vZ?$E$)mEU@ zX}2+{H&g(k335O>(Ans8dupq*(Ye{^Xev=b1)<#_J?J65xuUl!IjJTQ+Tn&d5SmVO z&|4L9OU30-X#F;7q3wVi!4c99%waB@DtRp(-9eT9Ls@~rW84jeb__De1W?iu+DW^* ztk4W%1O8PslOTw62!BX8HS_^`<>I7v@u;$@{1|hUypx8nB@`I%B$SxHRjVyDx``D7 NcQ?$zihvOW!*Feif1)VE2;9JR0_2dO%^`=P@{(NAY$Q^q zq@)3YqSvA?&;mKQK%XFo=67nQwk}c6N1r4`=@I z4QVjY?%la{@Y$hS41G>HDZY!Vw@d!y__I<77%Q$(zN)K#c>n z721~V^(@nOdp7a9*e2NS6U%b@y`XRQecUyDrXek9hYjgpSxW8pP3aNICUKgmh9m(J zE>Fp13M=mcJbwDct-GJ!y?ehi$^1@~cJd4pJnTe%JQ}AN%eTYP0iNQ{+{+}FXi;i^ zaC3;0hlep42Jj)eBn$untfEyAfQ6M{5TyexofmUa?SenA@`l@+AI|@+^3rt)AWDti zFc5(1(gGoT`q9~Wc{bR$=8Rc!A^{xJ3N zaPs=4?zs*Lt$t{QByg>6XxJX^hIY{N9W%tt{}R};{DAmu5RbUJW%ffO3_L=5VV5l6*ZhCesyzp>E=A;JYzhJs%EWh zmQ3Rj(IVfArxQ&#*ggwa33~Y@Drfz+^W(fHroT2598#a z_Et?wC-c}qH;c%{Wgg>c-mpMhm4pT?138zmh6U;r>bKA1^~(^bS!k>6z;$OoihIxy zYqx(mXv??1KWMMNVb1}ZwN?0)al#u^2R3VU_C^$g(@X5fDY__b0bN>!l`#m;?xy<~7Z;B9V zLKAd`t=H6AU6gBe32Kdcoi)N0nVpA1(Hd$%V}WU|%eBS=bG5)IGLk_Cs$4ycBR~f-YN8|AfotChY*l~BSE^i{gInmi0M7+UXW+Gc88z0vVt#QqU%$7~6Y}XlzC8MDY*kH5)V;GCf`>CKa_pp6Wlt4RT zUnW&-5J5!1Bn_%KbzU{9xX1+dk}JE2DH%244}ndEXSl^(IJt`l+=}~g$1-{)n5P00 dxUU2z+5L^LdsMlx$dx6&zhKtR&Wb;+KLH~_A*28R diff --git a/internal/gcimporter/testdata/versions/test_go1.8_4.a b/internal/gcimporter/testdata/versions/test_go1.8_4.a deleted file mode 100644 index 26b8531650ad8f85885fcf62f8306e5f686a1dca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1658 zcmah}L2KMb6n-<6*FxTgsq1b{DMnqE#W=23(#pGQ6G|FP8|cBM^&y0UGb@eOYGp~* zY7+vX^ji803OTsYV^2Ax*U&)Ez4U+d7mWL)-LVtJCc`jq=6m1!<~_|rlhjPd-wVGm z1D-#M5|f_<1J4Y^LlK)dcO2Vs`;O=Cn1?fAexBtfZk^$t>+IPh)3u$UdDF&W-rEav zej>6oN(zx1pW-kKqtm%#JQVpfN)yvF44`DW5Yqyd-UWE{`imOH%WU{ zWWwX##E(w0G)J9Y$qsnIdkd9mg;#~el#(N$04sC_2;jAb@reO2 zNuAv|ARz7HaV9M5m`{(bcH4R-RMHG&w{maQJ;xx|E6)s5%ZeOiq3bOJu&;eg||9}IQ_ZoB=#&>Ou*?)Uvb_&CH^jO@O<8#-Yy7GfCgjE0^+ z28o4*QH+`1;L-z1-V)|$7vdayp7)`O2X8d2V7P1wxZN4 z-cXKntI_>QA9B$jbiawL;MK!F3v!I5|== z3j6Bc=7-C={;N8_&5{K{S;gN}n`>;zf2Z`RKuIZWK?`jB&onfAL7izv)6|Rx4$y8e z0J;j2uF)nmFIg?7HITLbI_v&N^f`g&#PApey3c5fK42sG_$Pwv#_!cC@0;`)foH_9 zk+gQ3(dOEBCFx1IoRyrmWF5)?-KC6V-HXGTWSi6kQ!70khD%qntp>8NW4CD+x@a4E z2APiPXT(Bz*!o&GVEX~?Y{^_~Q9zPMb5$~UJ9Z7ZrKMZ~v%mQBri;N$8`F8p1bGlny&5i&D%B(^TFPz z;3p!_;8<196I3*1d$M-+mg4B!qV3?+X{?VH6}; zz?1Qxf9!a!=Lo0ognYn9KF4h@^mjvUyFI_}4PGPndO;`x9AYR2cF)}%IiqkWM1Qn1 z=zGBsB-S;G6U_7upOjf7Qc>_ygdc<4o>m}E-^l+I`eFz0rHAA0H@Xfz|BPke|1)`+iXcrZt?40oFTZL8Nb(Rod^G~7A$jbiewML-p{&gCc zI5|==3j6$@=6j2}{);-l&5{{GS;gN}n`>;vzft*Aprn+xpanMmXBry5pw2X-X=+9T z2WU4L09^%1*Ju-(m#h}k8pv9Io%Qg2`hvgr@ zU9=57hg?VXGh(4UY<-~{u>A;kwq!20C?LtBI_PL^_?O^Zt&-WS*lx{!fFX@4#LpRh hN$fL%fd7&p;{9E%1IcdUhrv&SSzxYkem?yz{|7p9b&dc4 From 1c9fe3f82c363b929ef7239ca0ad8a5dafbbcf05 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 11 May 2023 14:33:05 -0400 Subject: [PATCH 021/109] gopls: improve diagnostics for orphaned files Fix some of the very misleading errors gopls produces when it can't find packages for a file. Try to diagnose _why_ a file is orphaned by the workspace, rather than just guess that it may be due to build constraints. Only put diagnostics on files that we can prove are excluded in some way. To achieve this, we need to be able to differentiate command-line-arguments packages that are standalone packages, so add a corresponding field on source.Metadata. Refactor/simplify some functions that operate on open files. In the future, we really should track overlays separately in the snapshot, but that is out-of-scope for now. Also make a minor fix for TestImplementationsOfError: I use $HOME/src as my development directory, so GOROOT/src is $HOME/src/go/src. For golang/go#53880 Change-Id: I8e9fa7d4f2c03ce3daaab7c6d119b4276ec6da79 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494675 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/check.go | 12 +- .../{standalone_go116.go => constraints.go} | 37 +-- ...lone_go116_test.go => constraints_test.go} | 0 gopls/internal/lsp/cache/graph.go | 2 + gopls/internal/lsp/cache/load.go | 42 +++- gopls/internal/lsp/cache/session.go | 11 +- gopls/internal/lsp/cache/snapshot.go | 210 ++++++++++++++---- gopls/internal/lsp/cache/standalone_go115.go | 14 -- gopls/internal/lsp/cache/view.go | 2 +- gopls/internal/lsp/diagnostics.go | 77 +------ gopls/internal/lsp/regtest/marker.go | 42 +++- gopls/internal/lsp/source/view.go | 35 +-- .../testdata/diagnostics/excludedfile.txt | 38 ++++ .../marker/testdata/quickfix/addgowork.txt | 44 ++++ .../marker/testdata/quickfix/usemodule.txt | 49 ++++ .../internal/regtest/misc/references_test.go | 2 +- .../regtest/workspace/metadata_test.go | 2 +- 17 files changed, 431 insertions(+), 188 deletions(-) rename gopls/internal/lsp/cache/{standalone_go116.go => constraints.go} (63%) rename gopls/internal/lsp/cache/{standalone_go116_test.go => constraints_test.go} (100%) delete mode 100644 gopls/internal/lsp/cache/standalone_go115.go create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/excludedfile.txt create mode 100644 gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt create mode 100644 gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index ce844878df0..83ea17788a6 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -1440,7 +1440,7 @@ func (b *typeCheckBatch) typesConfig(ctx context.Context, inputs typeCheckInputs depPH := b.handles[id] if depPH == nil { // e.g. missing metadata for dependencies in buildPackageHandle - return nil, missingPkgError(path, inputs.moduleMode) + return nil, missingPkgError(inputs.id, path, inputs.moduleMode) } if !source.IsValidImport(inputs.pkgPath, depPH.m.PkgPath) { return nil, fmt.Errorf("invalid use of internal package %q", path) @@ -1601,13 +1601,17 @@ func depsErrors(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs // missingPkgError returns an error message for a missing package that varies // based on the user's workspace mode. -func missingPkgError(pkgPath string, moduleMode bool) error { +func missingPkgError(from PackageID, pkgPath string, moduleMode bool) error { // TODO(rfindley): improve this error. Previous versions of this error had // access to the full snapshot, and could provide more information (such as // the initialization error). if moduleMode { - // Previously, we would present the initialization error here. - return fmt.Errorf("no required module provides package %q", pkgPath) + if source.IsCommandLineArguments(from) { + return fmt.Errorf("current file is not included in a workspace module") + } else { + // Previously, we would present the initialization error here. + return fmt.Errorf("no required module provides package %q", pkgPath) + } } else { // Previously, we would list the directories in GOROOT and GOPATH here. return fmt.Errorf("cannot find package %q in GOROOT or GOPATH", pkgPath) diff --git a/gopls/internal/lsp/cache/standalone_go116.go b/gopls/internal/lsp/cache/constraints.go similarity index 63% rename from gopls/internal/lsp/cache/standalone_go116.go rename to gopls/internal/lsp/cache/constraints.go index 2f72d5f5495..9503abc1ebd 100644 --- a/gopls/internal/lsp/cache/standalone_go116.go +++ b/gopls/internal/lsp/cache/constraints.go @@ -2,12 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.16 -// +build go1.16 - package cache import ( + "go/ast" "go/build/constraint" "go/parser" "go/token" @@ -26,25 +24,38 @@ func isStandaloneFile(src []byte, standaloneTags []string) bool { return false } - for _, cg := range f.Comments { + found := false + walkConstraints(f, func(c constraint.Expr) bool { + if tag, ok := c.(*constraint.TagExpr); ok { + for _, t := range standaloneTags { + if t == tag.Tag { + found = true + return false + } + } + } + return true + }) + + return found +} + +// walkConstraints calls f for each constraint expression in the file, until +// all constraints are exhausted or f returns false. +func walkConstraints(file *ast.File, f func(constraint.Expr) bool) { + for _, cg := range file.Comments { // Even with PackageClauseOnly the parser consumes the semicolon following // the package clause, so we must guard against comments that come after // the package name. - if cg.Pos() > f.Name.Pos() { + if cg.Pos() > file.Name.Pos() { continue } for _, comment := range cg.List { if c, err := constraint.Parse(comment.Text); err == nil { - if tag, ok := c.(*constraint.TagExpr); ok { - for _, t := range standaloneTags { - if t == tag.Tag { - return true - } - } + if !f(c) { + return } } } } - - return false } diff --git a/gopls/internal/lsp/cache/standalone_go116_test.go b/gopls/internal/lsp/cache/constraints_test.go similarity index 100% rename from gopls/internal/lsp/cache/standalone_go116_test.go rename to gopls/internal/lsp/cache/constraints_test.go diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go index 3d1d0dd7931..684bdab957f 100644 --- a/gopls/internal/lsp/cache/graph.go +++ b/gopls/internal/lsp/cache/graph.go @@ -24,6 +24,8 @@ type metadataGraph struct { // ids maps file URIs to package IDs, sorted by (!valid, cli, packageID). // A single file may belong to multiple packages due to tests packages. + // + // Invariant: all IDs present in the ids map exist in the metadata map. ids map[span.URI][]PackageID } diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index d932c953cb6..6f60c3b6b07 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -36,12 +36,15 @@ var errNoPackages = errors.New("no packages returned") // // The resulting error may wrap the moduleErrorMap error type, representing // errors associated with specific modules. +// +// If scopes contains a file scope there must be exactly one scope. func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadScope) (err error) { id := atomic.AddUint64(&loadID, 1) eventName := fmt.Sprintf("go/packages.Load #%d", id) // unique name for logging var query []string var containsDir bool // for logging + var standalone bool // whether this is a load of a standalone file // Keep track of module query -> module path so that we can later correlate query // errors with errors. @@ -55,7 +58,14 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc query = append(query, string(scope)) case fileLoadScope: + // Given multiple scopes, the resulting load might contain inaccurate + // information. For example go/packages returns at most one command-line + // arguments package, and does not handle a combination of standalone + // files and packages. uri := span.URI(scope) + if len(scopes) > 1 { + panic(fmt.Sprintf("internal error: load called with multiple scopes when a file scope is present (file: %s)", uri)) + } fh := s.FindFile(uri) if fh == nil || s.View().FileKind(fh) != source.Go { // Don't try to load a file that doesn't exist, or isn't a go file. @@ -66,6 +76,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc continue } if isStandaloneFile(contents, s.view.Options().StandaloneTags) { + standalone = true query = append(query, uri.Filename()) } else { query = append(query, fmt.Sprintf("file=%s", uri.Filename())) @@ -144,6 +155,10 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc return fmt.Errorf("packages.Load error: %w", err) } + if standalone && len(pkgs) > 1 { + return bug.Errorf("internal error: go/packages returned multiple packages for standalone file") + } + // Workaround for a bug (?) that has been in go/packages since // the outset: Package("unsafe").GoFiles=[], whereas it should // include unsafe/unsafe.go. Derive it from builtins.go. @@ -156,9 +171,10 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc { var builtin, unsafe *packages.Package for _, pkg := range pkgs { - if pkg.ID == "unsafe" { + switch pkg.ID { + case "unsafe": unsafe = pkg - } else if pkg.ID == "builtin" { + case "builtin": builtin = pkg } } @@ -221,7 +237,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc if allFilesExcluded(pkg.GoFiles, filterFunc) { continue } - buildMetadata(newMetadata, pkg, cfg.Dir, query) + buildMetadata(newMetadata, pkg, cfg.Dir, standalone) } s.mu.Lock() @@ -372,7 +388,7 @@ https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.` rootMod = uri.Filename() } rootDir := filepath.Dir(rootMod) - nestedModules := make(map[string][]source.FileHandle) + nestedModules := make(map[string][]*Overlay) for _, fh := range openFiles { mod, err := findRootPattern(ctx, filepath.Dir(fh.URI().Filename()), "go.mod", s) if err != nil { @@ -401,9 +417,9 @@ See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more // "orphaned". Don't show a general diagnostic in the progress bar, // because the user may still want to edit a file in a nested module. var srcDiags []*source.Diagnostic - for modDir, uris := range nestedModules { + for modDir, files := range nestedModules { msg := fmt.Sprintf("This file is in %s, which is a nested module in the %s module.\n%s", modDir, rootMod, multiModuleMsg) - srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, uris)...) + srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, files)...) } if len(srcDiags) != 0 { return fmt.Errorf("You have opened a nested module.\n%s", multiModuleMsg), srcDiags @@ -412,7 +428,7 @@ See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more return nil, nil } -func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []source.FileHandle) []*source.Diagnostic { +func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []*Overlay) []*source.Diagnostic { var srcDiags []*source.Diagnostic for _, fh := range files { // Place the diagnostics on the package or module declarations. @@ -446,7 +462,7 @@ func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, fi // buildMetadata populates the updates map with metadata updates to // apply, based on the given pkg. It recurs through pkg.Imports to ensure that // metadata exists for all dependencies. -func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package, loadDir string, query []string) { +func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package, loadDir string, standalone bool) { // Allow for multiple ad-hoc packages in the workspace (see #47584). pkgPath := PackagePath(pkg.PkgPath) id := PackageID(pkg.ID) @@ -482,6 +498,7 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package Module: pkg.Module, Errors: pkg.Errors, DepsErrors: packagesinternal.GetDepsErrors(pkg), + Standalone: standalone, } updates[id] = m @@ -494,6 +511,10 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package uri := span.URIFromPath(filename) m.GoFiles = append(m.GoFiles, uri) } + for _, filename := range pkg.IgnoredFiles { + uri := span.URIFromPath(filename) + m.IgnoredFiles = append(m.IgnoredFiles, uri) + } depsByImpPath := make(map[ImportPath]PackageID) depsByPkgPath := make(map[PackagePath]PackageID) @@ -582,7 +603,7 @@ func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package depsByImpPath[importPath] = PackageID(imported.ID) depsByPkgPath[PackagePath(imported.PkgPath)] = PackageID(imported.ID) - buildMetadata(updates, imported, loadDir, query) + buildMetadata(updates, imported, loadDir, false) // only top level packages can be standalone } m.DepsByImpPath = depsByImpPath m.DepsByPkgPath = depsByPkgPath @@ -685,7 +706,8 @@ func containsOpenFileLocked(s *snapshot, m *source.Metadata) bool { } for uri := range uris { - if s.isOpenLocked(uri) { + fh, _ := s.files.Get(uri) + if _, open := fh.(*Overlay); open { return true } } diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go index d824f3c6f67..17f9bdb7bbd 100644 --- a/gopls/internal/lsp/cache/session.go +++ b/gopls/internal/lsp/cache/session.go @@ -308,7 +308,16 @@ func (s *Session) updateViewLocked(ctx context.Context, view *View, options *sou return nil, fmt.Errorf("view %q not found", view.id) } - v, _, release, err := s.createView(ctx, view.name, view.folder, options, seqID) + v, snapshot, release, err := s.createView(ctx, view.name, view.folder, options, seqID) + // The new snapshot has lost the history of the previous view. As a result, + // it may not see open files that aren't in its build configuration (as it + // would have done via didOpen notifications). This can lead to inconsistent + // behavior when configuration is changed mid-session. + // + // Ensure the new snapshot observes all open files. + for _, o := range v.fs.Overlays() { + _, _ = snapshot.ReadFile(ctx, o.URI()) + } release() if err != nil { diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 71f65630e0d..20353647461 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "go/ast" + "go/build/constraint" "go/token" "go/types" "io" @@ -901,15 +902,9 @@ func (s *snapshot) memoizeActivePackage(id PackageID, pkg *Package) (active *Pac defer func() { s.activePackages.Set(id, active, nil) // store the result either way: remember that pkg is not open }() - for _, cgf := range pkg.Metadata().GoFiles { - if s.isOpenLocked(cgf) { - return pkg - } - } - for _, cgf := range pkg.Metadata().CompiledGoFiles { - if s.isOpenLocked(cgf) { - return pkg - } + + if containsOpenFileLocked(s, pkg.Metadata()) { + return pkg } return nil } @@ -1215,7 +1210,6 @@ func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { // // The given uri must be a file, not a directory. func nearestModFile(ctx context.Context, uri span.URI, fs source.FileSource) (span.URI, error) { - // TODO(rfindley) dir := filepath.Dir(uri.Filename()) mod, err := findRootPattern(ctx, dir, "go.mod", fs) if err != nil { @@ -1261,11 +1255,11 @@ func (s *snapshot) clearShouldLoad(scopes ...loadScope) { } } -// noValidMetadataForURILocked reports whether there is any valid metadata for -// the given URI. -func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { +// noRealPackagesForURILocked reports whether there are any +// non-command-line-arguments packages containing the given URI. +func (s *snapshot) noRealPackagesForURILocked(uri span.URI) bool { for _, id := range s.meta.ids[uri] { - if _, ok := s.meta.metadata[id]; ok { + if !source.IsCommandLineArguments(id) || s.meta.metadata[id].Standalone { return false } } @@ -1351,28 +1345,25 @@ func (s lockedSnapshot) ReadFile(ctx context.Context, uri span.URI) (source.File func (s *snapshot) IsOpen(uri span.URI) bool { s.mu.Lock() defer s.mu.Unlock() - return s.isOpenLocked(uri) + fh, _ := s.files.Get(uri) + _, open := fh.(*Overlay) + return open } -func (s *snapshot) openFiles() []source.FileHandle { +func (s *snapshot) openFiles() []*Overlay { s.mu.Lock() defer s.mu.Unlock() - var open []source.FileHandle + var open []*Overlay s.files.Range(func(uri span.URI, fh source.FileHandle) { - if isFileOpen(fh) { - open = append(open, fh) + if o, ok := fh.(*Overlay); ok { + open = append(open, o) } }) return open } -func (s *snapshot) isOpenLocked(uri span.URI) bool { - fh, _ := s.files.Get(uri) - return isFileOpen(fh) -} - func isFileOpen(fh source.FileHandle) bool { _, open := fh.(*Overlay) return open @@ -1590,15 +1581,39 @@ func (s *snapshot) reloadWorkspace(ctx context.Context) error { // // An error is returned if the load is canceled. func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { + s.mu.Lock() + meta := s.meta + s.mu.Unlock() // When we load ./... or a package path directly, we may not get packages // that exist only in overlays. As a workaround, we search all of the files // available in the snapshot and reload their metadata individually using a // file= query if the metadata is unavailable. - files := s.orphanedOpenFiles() + open := s.openFiles() + var files []*Overlay + for _, o := range open { + uri := o.URI() + if s.IsBuiltin(uri) || s.view.FileKind(o) != source.Go { + continue + } + if len(meta.ids[uri]) == 0 { + files = append(files, o) + } + } if len(files) == 0 { return nil } + // Filter to files that are not known to be unloadable. + s.mu.Lock() + loadable := files[:0] + for _, file := range files { + if _, unloadable := s.unloadableFiles[file.URI()]; !unloadable { + loadable = append(loadable, file) + } + } + files = loadable + s.mu.Unlock() + var uris []span.URI for _, file := range files { uris = append(uris, file.URI()) @@ -1654,7 +1669,7 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // TODO(rfindley): instead of locking here, we should have load return the // metadata graph that resulted from loading. uri := file.URI() - if s.noValidMetadataForURILocked(uri) { + if len(s.meta.ids) == 0 { s.unloadableFiles[uri] = struct{}{} } } @@ -1662,34 +1677,133 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { return nil } -func (s *snapshot) orphanedOpenFiles() []source.FileHandle { +// OrphanedFileDiagnostics reports diagnostics describing why open files have +// no packages or have only command-line-arguments packages. +// +// If the resulting diagnostic is nil, the file is either not orphaned or we +// can't produce a good diagnostic. +// +// TODO(rfindley): reconcile the definition of "orphaned" here with +// reloadOrphanedFiles. The latter does not include files with +// command-line-arguments packages. +func (s *snapshot) OrphanedFileDiagnostics(ctx context.Context) map[span.URI]*source.Diagnostic { s.mu.Lock() - defer s.mu.Unlock() + meta := s.meta + s.mu.Unlock() - var files []source.FileHandle - s.files.Range(func(uri span.URI, fh source.FileHandle) { - // Only consider open files, which will be represented as overlays. - if _, isOverlay := fh.(*Overlay); !isOverlay { - return + var files []*Overlay + +searchOverlays: + for _, o := range s.overlays() { + uri := o.URI() + if s.IsBuiltin(uri) || s.view.FileKind(o) != source.Go { + continue } - // Don't try to reload metadata for go.mod files. - if s.view.FileKind(fh) != source.Go { - return + for _, id := range meta.ids[o.URI()] { + if !source.IsCommandLineArguments(id) || meta.metadata[id].Standalone { + continue searchOverlays + } } - // If the URI doesn't belong to this view, then it's not in a workspace - // package and should not be reloaded directly. - if !source.InDir(s.view.folder.Filename(), uri.Filename()) { - return + files = append(files, o) + } + if len(files) == 0 { + return nil + } + + loadedModFiles := make(map[span.URI]struct{}) + ignoredFiles := make(map[span.URI]bool) + for _, meta := range meta.metadata { + if meta.Module != nil && meta.Module.GoMod != "" { + gomod := span.URIFromPath(meta.Module.GoMod) + loadedModFiles[gomod] = struct{}{} } - // Don't reload metadata for files we've already deemed unloadable. - if _, ok := s.unloadableFiles[uri]; ok { - return + for _, ignored := range meta.IgnoredFiles { + ignoredFiles[ignored] = true + } + } + + diagnostics := make(map[span.URI]*source.Diagnostic) + for _, fh := range files { + // Only warn about orphaned files if the file is well-formed enough to + // actually be part of a package. + // + // Use ParseGo as for open files this is likely to be a cache hit (we'll have ) + pgf, err := s.ParseGo(ctx, fh, source.ParseHeader) + if err != nil { + continue } - if s.noValidMetadataForURILocked(uri) { - files = append(files, fh) + if !pgf.File.Name.Pos().IsValid() { + continue } - }) - return files + rng, err := pgf.PosRange(pgf.File.Name.Pos(), pgf.File.Name.End()) + if err != nil { + continue + } + + // If we have a relevant go.mod file, check whether the file is orphaned + // due to its go.mod file being inactive. We could also offer a + // prescriptive diagnostic in the case that there is no go.mod file, but it + // is harder to be precise in that case, and less important. + var msg string + if goMod, err := nearestModFile(ctx, fh.URI(), s); err == nil && goMod != "" { + if _, ok := loadedModFiles[goMod]; !ok { + modDir := filepath.Dir(goMod.Filename()) + if rel, err := filepath.Rel(s.view.folder.Filename(), modDir); err == nil { + modDir = rel + } + + var fix string + if s.view.goversion >= 18 { + if s.view.gowork != "" { + fix = fmt.Sprintf("To fix this problem, you can add this module to your go.work file (%s)", s.view.gowork) + } else { + fix = "To fix this problem, you can add a go.work file that uses this directory." + } + } else { + fix = `To work with multiple modules simultaneously, please upgrade to Go 1.18 or +later, reinstall gopls, and use a go.work file.` + } + msg = fmt.Sprintf(`This file is in directory %q, which is not included in your workspace. +%s +See the documentation for more information on setting up your workspace: +https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`, modDir, fix) + } + } + + if msg == "" && ignoredFiles[fh.URI()] { + // TODO(rfindley): use the constraint package to check if the file + // _actually_ satisfies the current build context. + hasConstraint := false + walkConstraints(pgf.File, func(constraint.Expr) bool { + hasConstraint = true + return false + }) + var fix string + if hasConstraint { + fix = `This file may be excluded due to its build tags; try adding "-tags=" to your gopls "buildFlags" configuration +See the documentation for more information on working with build tags: +https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string.` + } else if strings.Contains(filepath.Base(fh.URI().Filename()), "_") { + fix = `This file may be excluded due to its GOOS/GOARCH, or other build constraints.` + } else { + fix = `This file is ignored by your gopls build.` // we don't know why + } + msg = fmt.Sprintf("No packages found for open file %s.\n%s", fh.URI().Filename(), fix) + } + + if msg != "" { + // Only report diagnostics if we detect an actual exclusion. + diagnostics[fh.URI()] = &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityWarning, + Source: source.ListError, + Message: msg, + } + } + } + + return diagnostics } // TODO(golang/go#53756): this function needs to consider more than just the @@ -2347,7 +2461,7 @@ func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error return pgfs[0], nil } -func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool { +func (s *snapshot) IsBuiltin(uri span.URI) bool { s.mu.Lock() defer s.mu.Unlock() // We should always get the builtin URI in a canonical form, so use simple diff --git a/gopls/internal/lsp/cache/standalone_go115.go b/gopls/internal/lsp/cache/standalone_go115.go deleted file mode 100644 index 79569ae10ec..00000000000 --- a/gopls/internal/lsp/cache/standalone_go115.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.16 -// +build !go1.16 - -package cache - -// isStandaloneFile returns false, as the 'standaloneTags' setting is -// unsupported on Go 1.15 and earlier. -func isStandaloneFile(src []byte, standaloneTags []string) bool { - return false -} diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index ae988368aa0..884b0fcda2c 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -68,7 +68,7 @@ type View struct { vulns map[span.URI]*govulncheck.Result // fs is the file source used to populate this view. - fs source.FileSource + fs *overlayFS // seenFiles tracks files that the view has accessed. // TODO(golang/go#57558): this notion is fundamentally problematic, and diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go index 520549d1aaf..26f22422607 100644 --- a/gopls/internal/lsp/diagnostics.go +++ b/gopls/internal/lsp/diagnostics.go @@ -214,7 +214,7 @@ func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snaps } // Don't request type-checking for builtin.go: it's not a real package. - if snapshot.IsBuiltin(ctx, uri) { + if snapshot.IsBuiltin(uri) { continue } @@ -391,15 +391,8 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, analyze // Orphaned files. // Confirm that every opened file belongs to a package (if any exist in // the workspace). Otherwise, add a diagnostic to the file. - for _, o := range s.session.Overlays() { - if _, ok := seen[o.URI()]; ok { - continue - } - diagnostic := s.checkForOrphanedFile(ctx, snapshot, o) - if diagnostic == nil { - continue - } - s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic}, true) + for uri, diag := range snapshot.OrphanedFileDiagnostics(ctx) { + s.storeDiagnostics(snapshot, uri, orphanedSource, []*source.Diagnostic{diag}, true) } } @@ -475,7 +468,7 @@ func (s *Server) diagnosePkgs(ctx context.Context, snapshot source.Snapshot, toD // Merge analysis diagnostics with package diagnostics, and store the // resulting analysis diagnostics. for uri, adiags := range analysisDiags { - if snapshot.IsBuiltin(ctx, uri) { + if snapshot.IsBuiltin(uri) { bug.Reportf("go/analysis reported diagnostics for the builtin file: %v", adiags) continue } @@ -506,7 +499,7 @@ func (s *Server) diagnosePkgs(ctx context.Context, snapshot source.Snapshot, toD } // builtin.go exists only for documentation purposes, and is not valid Go code. // Don't report distracting errors - if snapshot.IsBuiltin(ctx, uri) { + if snapshot.IsBuiltin(uri) { bug.Reportf("type checking reported diagnostics for the builtin file: %v", diags) continue } @@ -668,66 +661,6 @@ func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Sn } } -// checkForOrphanedFile checks that the given URIs can be mapped to packages. -// If they cannot and the workspace is not otherwise unloaded, it also surfaces -// a warning, suggesting that the user check the file for build tags. -func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) *source.Diagnostic { - // TODO(rfindley): this function may fail to produce a diagnostic for a - // variety of reasons, some of which should probably not be ignored. For - // example, should this function be tolerant of the case where fh does not - // exist, or does not have a package name? - // - // It would be better to panic or report a bug in several of the cases below, - // so that we can move toward guaranteeing we show the user a meaningful - // error whenever it makes sense. - if snapshot.View().FileKind(fh) != source.Go { - return nil - } - // builtin files won't have a package, but they are never orphaned. - if snapshot.IsBuiltin(ctx, fh.URI()) { - return nil - } - - // This call has the effect of inserting fh into snapshot.files, - // where for better or worse (actually: just worse) it influences - // the sets of open, known, and orphaned files. - snapshot.ReadFile(ctx, fh.URI()) - - metas, _ := snapshot.MetadataForFile(ctx, fh.URI()) - if len(metas) > 0 || ctx.Err() != nil { - return nil // file has a package (or cancelled) - } - // Inv: file does not belong to a package we know about. - pgf, err := snapshot.ParseGo(ctx, fh, source.ParseHeader) - if err != nil { - return nil - } - if !pgf.File.Name.Pos().IsValid() { - return nil - } - rng, err := pgf.NodeRange(pgf.File.Name) - if err != nil { - return nil - } - // If the file no longer has a name ending in .go, this diagnostic is wrong - if filepath.Ext(fh.URI().Filename()) != ".go" { - return nil - } - // TODO(rstambler): We should be able to parse the build tags in the - // file and show a more specific error message. For now, put the diagnostic - // on the package declaration. - return &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityWarning, - Source: source.ListError, - Message: fmt.Sprintf(`No packages found for open file %s: %v. -If this file contains build tags, try adding "-tags=" to your gopls "buildFlags" configuration (see (https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-string). -Otherwise, see the troubleshooting guidelines for help investigating (https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md). -`, fh.URI().Filename(), err), - } -} - // publishDiagnostics collects and publishes any unpublished diagnostic reports. func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) { ctx, done := event.Start(ctx, "Server.publishDiagnostics", source.SnapshotLabels(snapshot)...) diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index bbab4376b2e..ddd05af8c3d 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -18,6 +18,7 @@ import ( "path/filepath" "reflect" "regexp" + "runtime" "sort" "strings" "testing" @@ -105,6 +106,10 @@ var update = flag.Bool("update", false, "if set, update test data during marker // -cgo requires that CGO_ENABLED is set and the cgo tool is available // -write_sumfile=a,b,c instructs the test runner to generate go.sum files // in these directories before running the test. +// -skip_goos=a,b,c instructs the test runner to skip the test for the +// listed GOOS values. +// TODO(rfindley): using build constraint expressions for -skip_goos would +// be clearer. // TODO(rfindley): support flag values containing whitespace. // - "settings.json": this file is parsed as JSON, and used as the // session configuration (see gopls/doc/settings.md) @@ -338,6 +343,12 @@ func RunMarkerTests(t *testing.T, dir string) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + for _, goos := range test.skipGOOS { + if runtime.GOOS == goos { + t.Skipf("skipping on %s due to -skip_goos", runtime.GOOS) + } + } + // TODO(rfindley): it may be more useful to have full support for build // constraints. if test.minGoVersion != "" { @@ -361,16 +372,9 @@ func RunMarkerTests(t *testing.T, dir string) { config.Settings["diagnosticsDelay"] = "10ms" } - var writeGoSum []string - if test.writeGoSum != "" { - for _, d := range strings.Split(test.writeGoSum, ",") { - writeGoSum = append(writeGoSum, strings.TrimSpace(d)) - } - } - run := &markerTestRun{ test: test, - env: newEnv(t, cache, test.files, test.proxyFiles, writeGoSum, config), + env: newEnv(t, cache, test.files, test.proxyFiles, test.writeGoSum, config), locations: make(map[expect.Identifier]protocol.Location), diags: make(map[protocol.Location][]protocol.Diagnostic), } @@ -575,7 +579,8 @@ type markerTest struct { // Parsed flags values. minGoVersion string cgo bool - writeGoSum string // comma separated dirs to write go sum for + writeGoSum []string // comma separated dirs to write go sum for + skipGOOS []string // comma separated GOOS values to skip } // flagSet returns the flagset used for parsing the special "flags" file in the @@ -584,10 +589,27 @@ func (t *markerTest) flagSet() *flag.FlagSet { flags := flag.NewFlagSet(t.name, flag.ContinueOnError) flags.StringVar(&t.minGoVersion, "min_go", "", "if set, the minimum go1.X version required for this test") flags.BoolVar(&t.cgo, "cgo", false, "if set, requires cgo (both the cgo tool and CGO_ENABLED=1)") - flags.StringVar(&t.writeGoSum, "write_sumfile", "", "if set, write the sumfile for these directories") + flags.Var((*stringListValue)(&t.writeGoSum), "write_sumfile", "if set, write the sumfile for these directories") + flags.Var((*stringListValue)(&t.skipGOOS), "skip_goos", "if set, skip this test on these GOOS values") return flags } +// stringListValue implements flag.Value. +type stringListValue []string + +func (l *stringListValue) Set(s string) error { + if s != "" { + for _, d := range strings.Split(s, ",") { + *l = append(*l, strings.TrimSpace(d)) + } + } + return nil +} + +func (l stringListValue) String() string { + return strings.Join([]string(l), ",") +} + func (t *markerTest) getGolden(id string) *Golden { golden, ok := t.golden[id] // If there was no golden content for this identifier, we must create one diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index b2a2ebda9d5..2a16ad60676 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -151,7 +151,7 @@ type Snapshot interface { BuiltinFile(ctx context.Context) (*ParsedGoFile, error) // IsBuiltin reports whether uri is part of the builtin package. - IsBuiltin(ctx context.Context, uri span.URI) bool + IsBuiltin(uri span.URI) bool // CriticalError returns any critical errors in the workspace. // @@ -207,6 +207,10 @@ type Snapshot interface { // It returns an error if the context was cancelled. MetadataForFile(ctx context.Context, uri span.URI) ([]*Metadata, error) + // OrphanedFileDiagnostics reports diagnostics for files that have no package + // associations or which only have only command-line-arguments packages. + OrphanedFileDiagnostics(ctx context.Context) map[span.URI]*Diagnostic + // -- package type-checking -- // TypeCheck parses and type-checks the specified packages, @@ -534,20 +538,25 @@ type TidiedModule struct { // An ad-hoc package (without go.mod or GOPATH) has its ID, PkgPath, // and LoadDir equal to the absolute path of its directory. type Metadata struct { - ID PackageID - PkgPath PackagePath - Name PackageName + ID PackageID + PkgPath PackagePath + Name PackageName + + // these three fields are as defined by go/packages.Package GoFiles []span.URI CompiledGoFiles []span.URI - ForTest PackagePath // package path under test, or "" - TypesSizes types.Sizes - Errors []packages.Error // must be set for packages in import cycles - DepsByImpPath map[ImportPath]PackageID // may contain dups; empty ID => missing - DepsByPkgPath map[PackagePath]PackageID // values are unique and non-empty - Module *packages.Module - DepsErrors []*packagesinternal.PackageError - Diagnostics []*Diagnostic // processed diagnostics from 'go list' - LoadDir string // directory from which go/packages was run + IgnoredFiles []span.URI + + ForTest PackagePath // package path under test, or "" + TypesSizes types.Sizes + Errors []packages.Error // must be set for packages in import cycles + DepsByImpPath map[ImportPath]PackageID // may contain dups; empty ID => missing + DepsByPkgPath map[PackagePath]PackageID // values are unique and non-empty + Module *packages.Module + DepsErrors []*packagesinternal.PackageError + Diagnostics []*Diagnostic // processed diagnostics from 'go list' + LoadDir string // directory from which go/packages was run + Standalone bool // package synthesized for a standalone file (e.g. ignore-tagged) } func (m *Metadata) String() string { return string(m.ID) } diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/excludedfile.txt b/gopls/internal/regtest/marker/testdata/diagnostics/excludedfile.txt new file mode 100644 index 00000000000..5944cbecb4e --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/excludedfile.txt @@ -0,0 +1,38 @@ +This test demonstrates diagnostics for various forms of file exclusion. + +Skip on plan9, an arbitrary GOOS, so that we can exercise GOOS exclusions +resulting from file suffixes. + +-- flags -- +-min_go=go1.18 +-skip_goos=plan9 + +-- go.work -- +go 1.21 + +use ( + ./a +) +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/a.go -- +package a + +-- a/a_plan9.go -- +package a //@diag(re"package (a)", re"excluded due to its GOOS/GOARCH") + +-- a/a_ignored.go -- +//go:build skip +package a //@diag(re"package (a)", re"excluded due to its build tags") + +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/b.go -- +package b //@diag(re"package (b)", re"add this module to your go.work") + diff --git a/gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt b/gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt new file mode 100644 index 00000000000..dbd1a954bea --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt @@ -0,0 +1,44 @@ +This test demonstrates the quick-fix for adding a go.work file. + +TODO(rfindley): actually add quick-fixes here. +TODO(rfindley): improve the "cannot find package" import errors. + +-- flags -- +-min_go=go1.18 + +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main //@diag("main", re"add a go.work file") + +import "mod.com/a/lib" //@diag("\"mod.com", re"cannot find package") + +func main() { + _ = lib.C +} + +-- a/lib/lib.go -- +package lib //@diag("lib", re"add a go.work file") + +const C = "b" +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main //@diag("main", re"add a go.work file") + +import "mod.com/b/lib" //@diag("\"mod.com", re"cannot find package") + +func main() { + _ = lib.C +} + +-- b/lib/lib.go -- +package lib //@diag("lib", re"add a go.work file") + +const C = "b" diff --git a/gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt b/gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt new file mode 100644 index 00000000000..62f4efcf9c8 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt @@ -0,0 +1,49 @@ +This test demonstrates the quick-fix for using a module directory. + +TODO(rfindley): actually add quick-fixes here. + +-- flags -- +-min_go=go1.18 + +-- go.work -- +go 1.21 + +use ( + ./a +) +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main + +import "mod.com/a/lib" + +func main() { + _ = lib.C +} + +-- a/lib/lib.go -- +package lib + +const C = "b" +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main //@diag("main", re"add this module to your go.work") + +import "mod.com/b/lib" //@diag("\"mod.com", re"not included in a workspace module") + +func main() { + _ = lib.C +} + +-- b/lib/lib.go -- +package lib //@diag("lib", re"add this module to your go.work") + +const C = "b" diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index cffbd60194a..1e14f1bbbbb 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -569,7 +569,7 @@ func fileLocations(env *regtest.Env, locs []protocol.Location) []string { got := make([]string, 0, len(locs)) for _, loc := range locs { path := env.Sandbox.Workdir.URIToPath(loc.URI) // (slashified) - if i := strings.Index(path, "/src/"); i >= 0 && filepath.IsAbs(path) { + if i := strings.LastIndex(path, "/src/"); i >= 0 && filepath.IsAbs(path) { // Absolute path with "src" segment: assume it's in GOROOT. // Strip directory and don't add line/column since they are fragile. path = "std:" + path[i+len("/src/"):] diff --git a/gopls/internal/regtest/workspace/metadata_test.go b/gopls/internal/regtest/workspace/metadata_test.go index ff72beb0d05..cd91da8b28d 100644 --- a/gopls/internal/regtest/workspace/metadata_test.go +++ b/gopls/internal/regtest/workspace/metadata_test.go @@ -97,7 +97,7 @@ func main() {} // packages for bar.go env.RegexpReplace("bar.go", "ignore", "excluded") env.AfterChange( - Diagnostics(env.AtRegexp("bar.go", "package (main)"), WithMessage("No packages")), + Diagnostics(env.AtRegexp("bar.go", "package (main)"), WithMessage("not included in your workspace")), ) }) } From 35fe77a6b0030754d8ab52bec8e1298456957f5a Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 15 May 2023 14:24:18 -0400 Subject: [PATCH 022/109] gopls/internal/lsp/filecache: limit parallelism in Set There may be thousands of concurrent calls to Set, and each one currently results in an OS thread for I/O. Users with lower values of ulimit -u may run into thread exhaustion (EAGAIN from pthread_create). This change adds a counting semaphore to Set to limit the parallelism to 128. Although higher values do yield modest further gains according to the new benchmark, the performance of Set is not very important since it is always called asynchronously by gopls. Fixes golang/go#60089 Change-Id: I0e744f10c0ae490c74fe0a68745e6a40edc53829 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494995 Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan Reviewed-by: Robert Findley --- gopls/internal/lsp/filecache/filecache.go | 5 +++ .../internal/lsp/filecache/filecache_test.go | 32 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index 038e309bb98..ff5d5b3952d 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -118,6 +118,9 @@ var ErrNotFound = fmt.Errorf("not found") // Set updates the value in the cache. func Set(kind string, key [32]byte, value []byte) error { + iolimit <- struct{}{} // acquire a token + defer func() { <-iolimit }() // release a token + name, err := filename(kind, key) if err != nil { return err @@ -157,6 +160,8 @@ func Set(kind string, key [32]byte, value []byte) error { 0600) } +var iolimit = make(chan struct{}, 128) // counting semaphore to limit I/O concurrency in Set. + var budget int64 = 1e9 // 1GB // SetBudget sets a soft limit on disk usage of the cache (in bytes) diff --git a/gopls/internal/lsp/filecache/filecache_test.go b/gopls/internal/lsp/filecache/filecache_test.go index 72e17ec375e..a078fd5cf69 100644 --- a/gopls/internal/lsp/filecache/filecache_test.go +++ b/gopls/internal/lsp/filecache/filecache_test.go @@ -218,6 +218,7 @@ func BenchmarkUncontendedGet(b *testing.B) { b.Fatal(err) } b.ResetTimer() + b.SetBytes(int64(len(value))) var group errgroup.Group group.SetLimit(50) @@ -231,3 +232,34 @@ func BenchmarkUncontendedGet(b *testing.B) { b.Fatal(err) } } + +// These two benchmarks are asymmetric: the one for Get imposes a +// modest bound on concurrency (50) whereas the one for Set imposes a +// much higher concurrency (1000) to test the implementation's +// self-imposed bound. + +func BenchmarkUncontendedSet(b *testing.B) { + const kind = "BenchmarkUncontendedSet" + key := uniqueKey() + var value [8192]byte + + const P = 1000 // parallelism + b.SetBytes(P * int64(len(value))) + + for i := 0; i < b.N; i++ { + // Perform P concurrent calls to Set. All must succeed. + var group errgroup.Group + for range [P]bool{} { + group.Go(func() error { + return filecache.Set(kind, key, value[:]) + }) + } + if err := group.Wait(); err != nil { + if strings.Contains(err.Error(), "operation not supported") || + strings.Contains(err.Error(), "not implemented") { + b.Skipf("skipping: %v", err) + } + b.Fatal(err) + } + } +} From 5eb1eb932587fa6779866fa2e5a2aecc577db1b1 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 15 May 2023 14:36:54 -0400 Subject: [PATCH 023/109] gopls/internal/lsp/cache: call filecache.Set asynchronously Change-Id: Idbdba4c6aa90c608ad537d64be5dfb0afc82048e Reviewed-on: https://go-review.googlesource.com/c/tools/+/494996 Reviewed-by: Robert Findley Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/analysis.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go index 83ae05e430d..4679041b24f 100644 --- a/gopls/internal/lsp/cache/analysis.go +++ b/gopls/internal/lsp/cache/analysis.go @@ -31,6 +31,7 @@ import ( "golang.org/x/tools/gopls/internal/lsp/filecache" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/facts" "golang.org/x/tools/internal/gcimporter" "golang.org/x/tools/internal/memoize" @@ -454,13 +455,15 @@ func analyzeImpl(ctx context.Context, snapshot *snapshot, analyzers []*analysis. if err != nil { return nil, err } - data := mustEncode(summary) - if false { - log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), id) - } - if err := filecache.Set(cacheKind, key, data); err != nil { - return nil, fmt.Errorf("internal error updating shared cache: %v", err) - } + go func() { + data := mustEncode(summary) + if false { + log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), id) + } + if err := filecache.Set(cacheKind, key, data); err != nil { + event.Error(ctx, "internal error updating analysis shared cache", err) + } + }() } // Hit or miss, we need to merge the export data from From 12a0517ad6224feef0a59a9dd176d67a23f15951 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 12 May 2023 15:32:35 -0400 Subject: [PATCH 024/109] internal/gcimporter: improve error handling This change: - updates the error message reported when the importer recovers from a panic. - updates the set of test input files to include examples of the formats used in go1.16-go1.20. - adds a recover handler to UImportData, for symmetry with IImportData. This was exposed by the new test case. - fixes an accidental shadowing bug that suppressed the bundle format version check. Fixes golang/go#59179 Change-Id: Ib6c20fc15e2051481fccba593607a7df0e01bc74 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494676 Reviewed-by: Robert Findley TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan gopls-CI: kokoro --- internal/gcimporter/gcimporter.go | 4 +--- internal/gcimporter/gcimporter_test.go | 10 +--------- internal/gcimporter/iimport.go | 9 +++------ internal/gcimporter/testdata/versions/test.go | 5 +---- .../gcimporter/testdata/versions/test_go1.16_i.a | Bin 0 -> 3464 bytes .../gcimporter/testdata/versions/test_go1.17_i.a | Bin 0 -> 3638 bytes .../testdata/versions/test_go1.18.5_i.a | Bin 0 -> 4162 bytes .../gcimporter/testdata/versions/test_go1.19_i.a | Bin 0 -> 4026 bytes .../gcimporter/testdata/versions/test_go1.20_u.a | Bin 0 -> 4288 bytes internal/gcimporter/ureader_yes.go | 9 +++++++++ 10 files changed, 15 insertions(+), 22 deletions(-) create mode 100644 internal/gcimporter/testdata/versions/test_go1.16_i.a create mode 100644 internal/gcimporter/testdata/versions/test_go1.17_i.a create mode 100644 internal/gcimporter/testdata/versions/test_go1.18.5_i.a create mode 100644 internal/gcimporter/testdata/versions/test_go1.19_i.a create mode 100644 internal/gcimporter/testdata/versions/test_go1.20_u.a diff --git a/internal/gcimporter/gcimporter.go b/internal/gcimporter/gcimporter.go index 5a36d0b0955..b1223713b94 100644 --- a/internal/gcimporter/gcimporter.go +++ b/internal/gcimporter/gcimporter.go @@ -230,9 +230,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { case 'v', 'c', 'd': // binary, till go1.10 diff --git a/internal/gcimporter/gcimporter_test.go b/internal/gcimporter/gcimporter_test.go index 33c1c1a916d..3d17e114d0d 100644 --- a/internal/gcimporter/gcimporter_test.go +++ b/internal/gcimporter/gcimporter_test.go @@ -314,14 +314,6 @@ func TestVersionHandling(t *testing.T) { // test that export data can be imported _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil) if err != nil { - // ok to fail if it fails with a newer version error for select files - if strings.Contains(err.Error(), "newer version") { - switch name { - case "test_go1.11_999b.a", "test_go1.11_999i.a": - continue - } - // fall through - } t.Errorf("import %q failed: %v", pkgpath, err) continue } @@ -351,7 +343,7 @@ func TestVersionHandling(t *testing.T) { _, err = Import(make(map[string]*types.Package), pkgpath, corruptdir, nil) if err == nil { t.Errorf("import corrupted %q succeeded", pkgpath) - } else if msg := err.Error(); !strings.Contains(msg, "version skew") { + } else if msg := err.Error(); !strings.Contains(msg, "internal error") { t.Errorf("import %q error incorrect (%s)", pkgpath, msg) } } diff --git a/internal/gcimporter/iimport.go b/internal/gcimporter/iimport.go index be6dace1534..94a5eba333f 100644 --- a/internal/gcimporter/iimport.go +++ b/internal/gcimporter/iimport.go @@ -131,7 +131,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -140,11 +140,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } diff --git a/internal/gcimporter/testdata/versions/test.go b/internal/gcimporter/testdata/versions/test.go index 6362adc2108..924f4447314 100644 --- a/internal/gcimporter/testdata/versions/test.go +++ b/internal/gcimporter/testdata/versions/test.go @@ -13,10 +13,7 @@ // // go build -o test_go1.$X_$Y.a test.go // -// with $X = Go version and $Y = export format version -// (add 'b' or 'i' to distinguish between binary and -// indexed format starting with 1.11 as long as both -// formats are supported). +// with $X = Go version and $Y = export format version (e.g. 'i', 'u'). // // Make sure this source is extended such that it exercises // whatever export format change has taken place. diff --git a/internal/gcimporter/testdata/versions/test_go1.16_i.a b/internal/gcimporter/testdata/versions/test_go1.16_i.a new file mode 100644 index 0000000000000000000000000000000000000000..35dc863e81cd71c58097b9fc92906fb095aed57e GIT binary patch literal 3464 zcmcImZ)_A*5TCs}=yk!g1Oz2U4y*{o?%lQ5yDMo3!O|imwFsyo#OvPfyY0f>?sadk z1=L^y#xMRs4EhCQNK7@T3E~Hkh!Tluh(^GOf+Pfi4;uers)>qPXLj%1U9YVW)JcCk zv%fcQ-n@Blt~<9)ww2x`Vk#wduj*`H-VyR7Qq-h8+k3`iF?Xb?skP|QBWhO2N^fL} z6H;Y+KsQ6OosGvrniZ9z@z6tyP0M6rx}Y0sNLNGiwq!e2tXw@1>s@1XYsSVFZMB*T zXF9uE*BM*>QAFo2q(MXFo`u|pFt_WD^ zXxvbFx&i=P@_G0c^;N_cKfobGPXORJV#6d44(b_`0dk0s5esSnx@rMlLmZp}aI6St zOO8nr>ys2Ko0Uzq(a=rSDBD^g%SO*59rv?LQTC9PQ*-N5zRq%w)Fg zFm;KW)S_!=WJ$W2v83zwOykYcEJ-Q2^K!PO78GVru=ff_S=l6GdUBRc;LFRHDVWLx zE)}J;3#OxEtSrm2zJhGTV^~y9YD!wi!bhTBOj?w#ytIh!cSR7%9lvIgs=|6fy@txk zPVYp?RxO@tbP4Q9eq{eq-{O^45wj2%5nYJgc&+b3{Tl+iwjGkL0Wcr+92`V)DQqTP z6p8?Vx4%+6vJRl3g7wIHfSbpVA^l{d*hD`?`ysIVd1k^_nbTb%k9s;k~s4I@(WWE zgHN37JGF3U+n~~LxNWvAym{iD&Ryz*XZkN~cxnHnv!Cob)wb2x*nVjESYh9j#~&Xm z{66i@!B0E9#LK7;AU;BzK%7CGL!3u^g}99P8F3Xs{Z)g9T8E(g%a?UzWMmsvC^U-w z3t1%GHVokZMo<1MvJreON4SbCh94&`HC||xFU2KaYVWaesc*_G#YF-}uSPjg1gcI@ z=)orm&!HRbas=+%gKRlpV(EVOj$)|}_)p%I;9{x>39+Q9DT0gZ$wc8S?XMh{v#tvj z*%iJE+S^#)n9EkjU4d+1jyoM66qkP2?jM)kFovbO)H8~u9Qcpm*c{NcBC|ixcLy>8 zO`Vg%4(Aj5qx16)5Myh`VO^~350D;5@M6`(}KB(u+o!d^A-Ovc0>^Jw^ z@1A?^x#!+9nVs4pTGGH=K9v&smUJ&z*cG4>a$0PtPM`5u%pQqE+8m31UNr+|dbJ`I z0O9j)^^Fhrh zD3&4WK~>VSIn#n(u%P4%vRDv<>tK#%8hM)$RFiNi9}e=Jx@fHF(iB~;?dJG(WD!TY zS4^=;fDjMix)&2}DNi=_uJ;aJOzedyc#=}hlwdj=9?1ypSISN_lB8)0{D0Y*$`i6pgfJ!WlWOD2Ys4G7*!~<6m8XkTfG>hE+48 z=@4>b9xQMDLNJV>I&NAhqq*ztbiY ziTg%jM^1aLCk_5T9UKO~d^AGp6kd*GV>yXY9JE8CM)tGZi1kxlFK?gy^aqp99{%Io zbg1L*A3Jv(ncMKvs~4Ua>f3&4{*#_aq=R7Z*|M<7T9__`$rFT|sC8tkL zI`QQ*Uv+HIS1;H<^l9;}Enhr3Sp2glx_Gksob}3MsqfxA-ul_;`{op-raO0jedPFt z_a84DJTv3Np^KZ3{qRvG?%~|>`g6`!#r=<(He^9pa9j zz2nI}gXs&nAs=9Z1H8U?oS62j0bbq7fNLo?fU|O-|0G}!052WB4dw%O0$z8p8`@6* zCjj3A&H;V`TmW1I3;})z)WAVc05kz61F*L35`-~FhKGkYVY&hU*B!qEG+JQw`bNv{ zABPT&wf+lebXmbSR%c+WHTp*DD!#G2TuY*FU`W}L7zsMTR6*SR(4eqL`apL%Vb=XX zyLO2*q=XENr7-1j1& zeYaB&@I>GEf?QUae&}+V2daN8jger?C>0_pqyGl$&A@Ru4Rhwf6Kb62qtWMY6#s5lwyuoFhKxZF3nGrW-F9u)b!0rYunh#afsBj^Yj1CFy9@2T-EBLD z5A+Y;Pka*|B7|r}rqSSo5Tou7P!nS`;u29515pGu0TYP8<~iT(*LAgH_#pAie&0Fw zch7gebI$kJ?euCv6FcUaJs!S!NyEardW+s{35i12_1@)l(l>TDv+$aOszvp!mBg6E zFKFv!#Uf~7m(vnd%lUFQUtwvTqe(%*C$HCpNJP@2rTU{L1wt?=@g!)$XlXngN0-j8?L5~$_j^W{lBZ}bTfhFEO&?eEJ2PCvM5VOGe|FB zAK)lp@+d;~0_JDyXaadh4k5n*L=z!>fZIkBlAT9L(O4a^4H1dwrA}T{!(l=3mxW|S zDuZ1S4@*ic$`4jQD27x~2t|9}3D|9wc2{M<7pQUxw(?51W{k3FD-c)2 zVN6mMLyIdh8GMDMu+$kBLM|sP3nPP~PllzlC2EC};t}IZ@I0kB;pvJCk`x#l3ATjr2jAX-0fP5GL8F3^R1lAhKeE>?r%|uBD z$fJOP0kDLtj85bNnGZ8k5`E+BINVEhE|} zW5ZRQ#DZt!jX88}+-by?U3NKav(O$wd+Xr*eeZu`s9!w`O z-ZL~L-H7(-Mtv_%JJ<3>Q;%}{Ch-d-&a|6NcwbzklY? zE%NgRyL;LmBFByvop98tQ)~C{$=&1AcejFG$cw|hSJP> zl}=+;*qF5s^b{v(jRzfDh5*xOOy_>!a3RyIv6_~lbU#=FL$7cn?rptY0veaV21s)P zpOTq;W34Sit~prA1hr^a}Gv(OI~!pK%K8x9 e*~aXd(UQY?lX6U^$?SO~$pUAR*%vcqIQb`-wP0ZY literal 0 HcmV?d00001 diff --git a/internal/gcimporter/testdata/versions/test_go1.19_i.a b/internal/gcimporter/testdata/versions/test_go1.19_i.a new file mode 100644 index 0000000000000000000000000000000000000000..ff8f5995bb8b676af52b79f36e7339f6aaca6a3c GIT binary patch literal 4026 zcmc&%Yitx%6uz@v+U%~=4Mmk2wKhYF#}y+n*X&A$ns7I!v`ARg!s7IxvA)*l z^BK(*4gNr~r2fgOB`sdLG8)aVl$+I5LCtEpsHJt;n#gpw+dXq;xm$BIOsMqMClJvq zh4q-;D#g5Q-u9%fM-pBwmQ;+G6!EIE7LDr$dfudxNQR}PUdsB%Box+^wuiV1boW3??$>U_xLC6PCHzd)2dl%001PL>BI#ss}y#K&h;n z3|F;6e^IgN{XbM~nww!sd?k^vH3fk&R+3?25p*4J9=HJ%j$&*t;LSAg1nDfs4gix~ zjCBHIM>F;baPB@6$&R?<@hB}GS&v4gSU5kT#gu%kbSkREk_pd1eN{Q4%Tgq9>Uf1z z*VdkBD4yT3*4JEBv#Oywy3VhbmJWER^U9Tur&DvdlPTTDgIvE(8DQN zF@~v@%~6uqNf=j(D#Q3%^J+@P)h)bS>ZROLnuOI6TU zD;kszjVF&zh%JbRk2&((x!Z^pN*mprk9l(AQnGotIhlU_IR^7Nfx~Yt*?)9F;GBej z$v`muAGcLIEWm}Smhvc9Ags)k#5GC z`^fRoQ_X=g7#(E-BC*{T3AMF^fe)_c?tvcok=St5ijJM z$qOt89hy-taL^*DbVSnJFIuGa`gD-n0L(~W*-)r8Bl3NKlen`uSqOa|=HYX>qkqmF@UPbVph9-flv#M^YI&Rx^u;?isTPU3gD%JXqgdS861<&lPS zJ;ld%A9*7n*B+hn+vTtJeQ-$D3E* zGHdy{4ch|We{-nxS>r;!=EwZ^NC$^vlkvN-E~Ocd2{uO z>|d{~cx}t2E5QdYj#_zzia=Wkziu3~>1bC@xd(U{F{c9eS|(tCbkLY|dmRC~fcvrV zGk{Lu)jn*8ejV5W>`KEP^l9gx1-gKX0B!oKz<0pUz%9Um4dw=N06NIF!_(W_yO~NY z-cH{F4Tj$VPCBQi{0W+NYyn^k@;hh@GgNH4BE;LhDK>fM0z<^6<7}fTHkkAwDHFPB zsC>vI1Vv(HQU}`>#PSY-w&hJU?sa~#+i0o@KCZa9%KHqup3F?B0HCRABo;jb_7L84 zOxn7^EN`ky+K^yOnPeI44-9!7J*vqN-bE&z9=i&(^HDUW(2!KBYQRQQsVIy3+iA;t ztx40YNG$J0*tQ_#O%)^=BEdA8rn42CK+&)lnS)k{H1NUQ|IPBQblaMeui-J|`#Wk6+=)=!4g55L#Nq(YPGL*1r{;y1*-X?Er{{m!vGSUD5 literal 0 HcmV?d00001 diff --git a/internal/gcimporter/testdata/versions/test_go1.20_u.a b/internal/gcimporter/testdata/versions/test_go1.20_u.a new file mode 100644 index 0000000000000000000000000000000000000000..608dba8362402551fed063aa39a5c00417dd9849 GIT binary patch literal 4288 zcmd5=Yitx%6rS1Lw%Y{+s(>QM@({4JyDhETg0w+|wxQixDn%Z~-RbUhI&^ntJGVqE}b1$NvdyNNKjfTWHA)9SUDOMj~^zi9B*Zc z#lmxV$z^c7CBwo&vt_`>&hy-O3lD+@Xt7wxfE$;|+FQs6D?eR2&^7=3gig$#gY%(B zmzrE$h{1M3k!MzkZvB7S2@@S0_TS8cP|z40O<4sdhENMU4x9%vF*OfnU|E4O%r3!8 zGK=F50rz4LZ3D(*|8xRBVh<5wiHJDcCAPZKuf6N~s!4&68W2L!V@DPUb?qI|8&ew@X;nM8?n>TDiCMwBAObE2Sjav*)kal zffi%}MF@)n*c#(%iYtKuOiYun#g&*0UtuvUw#J2!r})^BCWDj$jdCpNDkv=IQ}iAv z1UiteTsoR2moD~x@R?v{gU(kghq>tK}r@H2sfHm)CuOkB~AL~0(@k32*`0X;n+k@8qa9+K#hk>s0t z&jtKU=d;a&s2F=oubG~HrvIZI zLc!Nwk5G3y6rv-Qq$qx7A?Zf!nQF9Y>%@q&`r^)`fr@aI>)r0uy7QsddzPQeJt^ z@7(iF{n5nX9Z%GDUEO?0u6%OWw%68t)BV93@0b&wg59O7%0GO0Z+SzvJGY^A>Y|gY zHdcQ3^}e#lv{R)k-gsuy>Jz2Up5E1CS^epY9_!>!Ms?<8&)HwLb)UNB>ZA49zh10= ze%+aKzB|6iSbUBqfp!bsmk6}!Xjjr5=~m-z_%!DQRR5E5JG`cXI-=}#5I70sVBx0$ zn}L^-*bezBumjkgf<36e2Ye2k2F?RN02hHvz%?Knn`;;_3K$R2K{g$po}QkyG}X?& z^q-)?upD5cbE=m=K+}#b08CDxP9+iMeDhDmtEJx8G&vZlqJhR1B%t^h0Cl>7=n2=pD69ZEtSFb978@Sa@zxzxR|; z^kUuo;^S@AFN8Pq_lVKBSMsm0KyxCdXmeUkm5~~|9#8{gI2Id+UmF&WJ^2x- zw`6W~`l~O;`^X0yg5iwAg7$FSx7N9De`NJ_8G+?Tn+5z!z^VV^fNtT&n_l@Bh&x=; literal 0 HcmV?d00001 diff --git a/internal/gcimporter/ureader_yes.go b/internal/gcimporter/ureader_yes.go index 34fc783f82b..b977435f626 100644 --- a/internal/gcimporter/ureader_yes.go +++ b/internal/gcimporter/ureader_yes.go @@ -10,6 +10,7 @@ package gcimporter import ( + "fmt" "go/token" "go/types" "sort" @@ -63,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) From a13793e315cd37beaa5eead1298821a13c9ca41e Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 12 May 2023 14:22:10 -0400 Subject: [PATCH 025/109] gopls/internal/lsp: add quick-fixes to manage the go.work file Update OrphanedFileDiagnostics to provide suggested fixes for diagnostics related to modules that are not activated by a relevant go.work file. Also remove the Snapshot.openFiles method, which was completely redundant with Snapshot.overlays. Fixes golang/go#53880 Change-Id: I7e7aed97fb0b93415fe3dc383b6daea15241f31b Reviewed-on: https://go-review.googlesource.com/c/tools/+/494738 Reviewed-by: Mouffull Reviewed-by: Alan Donovan Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/doc/commands.md | 15 + gopls/internal/lsp/cache/load.go | 2 +- gopls/internal/lsp/cache/session.go | 24 +- gopls/internal/lsp/cache/snapshot.go | 160 ++++++-- gopls/internal/lsp/cache/view.go | 13 +- gopls/internal/lsp/code_action.go | 12 + gopls/internal/lsp/command.go | 159 ++++++-- gopls/internal/lsp/command/command_gen.go | 20 + gopls/internal/lsp/command/interface.go | 10 + gopls/internal/lsp/diagnostics.go | 10 +- gopls/internal/lsp/lsp_test.go | 2 +- gopls/internal/lsp/regtest/marker.go | 2 +- gopls/internal/lsp/source/api_json.go | 6 + gopls/internal/lsp/source/view.go | 4 +- gopls/internal/lsp/workspace.go | 2 +- .../{quickfix => diagnostics}/addgowork.txt | 5 +- .../{quickfix => diagnostics}/usemodule.txt | 6 +- .../regtest/workspace/quickfix_test.go | 344 ++++++++++++++++++ 18 files changed, 717 insertions(+), 79 deletions(-) rename gopls/internal/regtest/marker/testdata/{quickfix => diagnostics}/addgowork.txt (84%) rename gopls/internal/regtest/marker/testdata/{quickfix => diagnostics}/usemodule.txt (79%) create mode 100644 gopls/internal/regtest/workspace/quickfix_test.go diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md index be031e91729..8fe677b259b 100644 --- a/gopls/doc/commands.md +++ b/gopls/doc/commands.md @@ -289,6 +289,21 @@ Args: } ``` +### **run `go work [args...]`, and apply the resulting go.work** +Identifier: `gopls.run_go_work_command` + +edits to the current go.work file. + +Args: + +``` +{ + "ViewID": string, + "InitFirst": bool, + "Args": []string, +} +``` + ### **Run govulncheck.** Identifier: `gopls.run_govulncheck` diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index 6f60c3b6b07..521dc1ee63e 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -354,7 +354,7 @@ func (s *snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.D // Apply diagnostics about the workspace configuration to relevant open // files. - openFiles := s.openFiles() + openFiles := s.overlays() // If the snapshot does not have a valid build configuration, it may be // that the user has opened a directory that contains multiple modules. diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go index 17f9bdb7bbd..eaad67c8e06 100644 --- a/gopls/internal/lsp/cache/session.go +++ b/gopls/internal/lsp/cache/session.go @@ -46,6 +46,11 @@ type Session struct { func (s *Session) ID() string { return s.id } func (s *Session) String() string { return s.id } +// GoCommandRunner returns the gocommand Runner for this session. +func (s *Session) GoCommandRunner() *gocommand.Runner { + return s.gocmdRunner +} + // Options returns a copy of the SessionOptions for this session. func (s *Session) Options() *source.Options { s.optionsMu.Lock() @@ -113,7 +118,8 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, return nil, nil, func() {}, err } - wsModFiles, wsModFilesErr := computeWorkspaceModFiles(ctx, info.gomod, info.effectiveGOWORK(), info.effectiveGO111MODULE(), s) + gowork, _ := info.GOWORK() + wsModFiles, wsModFilesErr := computeWorkspaceModFiles(ctx, info.gomod, gowork, info.effectiveGO111MODULE(), s) // We want a true background context and not a detached context here // the spans need to be unrelated and no tag values should pollute it. @@ -199,8 +205,8 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, return v, snapshot, snapshot.Acquire(), nil } -// View returns a view with a matching name, if the session has one. -func (s *Session) View(name string) *View { +// ViewByName returns a view with a matching name, if the session has one. +func (s *Session) ViewByName(name string) *View { s.viewMu.Lock() defer s.viewMu.Unlock() for _, view := range s.views { @@ -211,6 +217,18 @@ func (s *Session) View(name string) *View { return nil } +// View returns the view with a matching id, if present. +func (s *Session) View(id string) (*View, error) { + s.viewMu.Lock() + defer s.viewMu.Unlock() + for _, view := range s.views { + if view.ID() == id { + return view, nil + } + } + return nil, fmt.Errorf("no view with ID %q", id) +} + // ViewOf returns a view corresponding to the given URI. // If the file is not already associated with a view, pick one using some heuristics. func (s *Session) ViewOf(uri span.URI) (*View, error) { diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 20353647461..45a32a142ff 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -31,6 +31,7 @@ import ( "golang.org/x/tools/go/packages" "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/gopls/internal/bug" + "golang.org/x/tools/gopls/internal/lsp/command" "golang.org/x/tools/gopls/internal/lsp/filecache" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/lsp/source" @@ -191,6 +192,16 @@ type snapshot struct { // detect ignored files. ignoreFilterOnce sync.Once ignoreFilter *ignoreFilter + + // If non-nil, the result of computing orphaned file diagnostics. + // + // Only the field, not the map itself, is guarded by the mutex. The map must + // not be mutated. + // + // Used to save work across diagnostics+code action passes. + // TODO(rfindley): refactor all of this so there's no need to re-evaluate + // diagnostics during code-action. + orphanedFileDiagnostics map[span.URI]*source.Diagnostic } var globalSnapshotID uint64 @@ -293,7 +304,8 @@ func (s *snapshot) ModFiles() []span.URI { } func (s *snapshot) WorkFile() span.URI { - return s.view.effectiveGOWORK() + gowork, _ := s.view.GOWORK() + return gowork } func (s *snapshot) Templates() map[span.URI]source.FileHandle { @@ -544,7 +556,7 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat // the main (workspace) module. Otherwise, we should use the module for // the passed-in working dir. if mode == source.LoadWorkspace { - if s.view.effectiveGOWORK() == "" && s.view.gomod != "" { + if gowork, _ := s.view.GOWORK(); gowork == "" && s.view.gomod != "" { modURI = s.view.gomod } } else { @@ -929,7 +941,7 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru } // If GOWORK is outside the folder, ensure we are watching it. - gowork := s.view.effectiveGOWORK() + gowork, _ := s.view.GOWORK() if gowork != "" && !source.InDir(s.view.folder.Filename(), gowork.Filename()) { patterns[gowork.Filename()] = struct{}{} } @@ -1351,19 +1363,6 @@ func (s *snapshot) IsOpen(uri span.URI) bool { return open } -func (s *snapshot) openFiles() []*Overlay { - s.mu.Lock() - defer s.mu.Unlock() - - var open []*Overlay - s.files.Range(func(uri span.URI, fh source.FileHandle) { - if o, ok := fh.(*Overlay); ok { - open = append(open, o) - } - }) - return open -} - func isFileOpen(fh source.FileHandle) bool { _, open := fh.(*Overlay) return open @@ -1588,7 +1587,7 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // that exist only in overlays. As a workaround, we search all of the files // available in the snapshot and reload their metadata individually using a // file= query if the metadata is unavailable. - open := s.openFiles() + open := s.overlays() var files []*Overlay for _, o := range open { uri := o.URI() @@ -1686,10 +1685,26 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { // TODO(rfindley): reconcile the definition of "orphaned" here with // reloadOrphanedFiles. The latter does not include files with // command-line-arguments packages. -func (s *snapshot) OrphanedFileDiagnostics(ctx context.Context) map[span.URI]*source.Diagnostic { +func (s *snapshot) OrphanedFileDiagnostics(ctx context.Context) (map[span.URI]*source.Diagnostic, error) { + // Orphaned file diagnostics are queried from code actions to produce + // quick-fixes (and may be queried many times, once for each file). + // + // Because they are non-trivial to compute, record them optimistically to + // avoid most redundant work. + // + // This is a hacky workaround: in the future we should avoid recomputing + // anything when codeActions provide a diagnostic: simply read the published + // diagnostic, if it exists. s.mu.Lock() - meta := s.meta + existing := s.orphanedFileDiagnostics s.mu.Unlock() + if existing != nil { + return existing, nil + } + + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } var files []*Overlay @@ -1699,20 +1714,30 @@ searchOverlays: if s.IsBuiltin(uri) || s.view.FileKind(o) != source.Go { continue } - for _, id := range meta.ids[o.URI()] { - if !source.IsCommandLineArguments(id) || meta.metadata[id].Standalone { + md, err := s.MetadataForFile(ctx, uri) + if err != nil { + return nil, err + } + for _, m := range md { + if !source.IsCommandLineArguments(m.ID) || m.Standalone { continue searchOverlays } } files = append(files, o) } if len(files) == 0 { - return nil + return nil, nil } - loadedModFiles := make(map[span.URI]struct{}) - ignoredFiles := make(map[span.URI]bool) - for _, meta := range meta.metadata { + loadedModFiles := make(map[span.URI]struct{}) // all mod files, including dependencies + ignoredFiles := make(map[span.URI]bool) // files reported in packages.Package.IgnoredFiles + + meta, err := s.AllMetadata(ctx) + if err != nil { + return nil, err + } + + for _, meta := range meta { if meta.Module != nil && meta.Module.GoMod != "" { gomod := span.URIFromPath(meta.Module.GoMod) loadedModFiles[gomod] = struct{}{} @@ -1740,15 +1765,25 @@ searchOverlays: continue } + var ( + msg string // if non-empty, report a diagnostic with this message + suggestedFixes []source.SuggestedFix // associated fixes, if any + ) + // If we have a relevant go.mod file, check whether the file is orphaned // due to its go.mod file being inactive. We could also offer a // prescriptive diagnostic in the case that there is no go.mod file, but it // is harder to be precise in that case, and less important. - var msg string if goMod, err := nearestModFile(ctx, fh.URI(), s); err == nil && goMod != "" { if _, ok := loadedModFiles[goMod]; !ok { modDir := filepath.Dir(goMod.Filename()) - if rel, err := filepath.Rel(s.view.folder.Filename(), modDir); err == nil { + viewDir := s.view.folder.Filename() + + // When the module is underneath the view dir, we offer + // "use all modules" quick-fixes. + inDir := source.InDir(viewDir, modDir) + + if rel, err := filepath.Rel(viewDir, modDir); err == nil { modDir = rel } @@ -1756,8 +1791,59 @@ searchOverlays: if s.view.goversion >= 18 { if s.view.gowork != "" { fix = fmt.Sprintf("To fix this problem, you can add this module to your go.work file (%s)", s.view.gowork) + if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work use`", command.RunGoWorkArgs{ + ViewID: s.view.ID(), + Args: []string{"use", modDir}, + }); err == nil { + suggestedFixes = append(suggestedFixes, source.SuggestedFix{ + Title: "Use this module in your go.work file", + Command: &cmd, + ActionKind: protocol.QuickFix, + }) + } + + if inDir { + if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work use -r`", command.RunGoWorkArgs{ + ViewID: s.view.ID(), + Args: []string{"use", "-r", "."}, + }); err == nil { + suggestedFixes = append(suggestedFixes, source.SuggestedFix{ + Title: "Use all modules in your workspace", + Command: &cmd, + ActionKind: protocol.QuickFix, + }) + } + } } else { fix = "To fix this problem, you can add a go.work file that uses this directory." + + if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work init && go work use`", command.RunGoWorkArgs{ + ViewID: s.view.ID(), + InitFirst: true, + Args: []string{"use", modDir}, + }); err == nil { + suggestedFixes = []source.SuggestedFix{ + { + Title: "Add a go.work file using this module", + Command: &cmd, + ActionKind: protocol.QuickFix, + }, + } + } + + if inDir { + if cmd, err := command.NewRunGoWorkCommandCommand("Run `go work init && go work use -r`", command.RunGoWorkArgs{ + ViewID: s.view.ID(), + InitFirst: true, + Args: []string{"use", "-r", "."}, + }); err == nil { + suggestedFixes = append(suggestedFixes, source.SuggestedFix{ + Title: "Add a go.work file using all modules in your workspace", + Command: &cmd, + ActionKind: protocol.QuickFix, + }) + } + } } } else { fix = `To work with multiple modules simultaneously, please upgrade to Go 1.18 or @@ -1794,16 +1880,22 @@ https://github.com/golang/tools/blob/master/gopls/doc/settings.md#buildflags-str if msg != "" { // Only report diagnostics if we detect an actual exclusion. diagnostics[fh.URI()] = &source.Diagnostic{ - URI: fh.URI(), - Range: rng, - Severity: protocol.SeverityWarning, - Source: source.ListError, - Message: msg, + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityWarning, + Source: source.ListError, + Message: msg, + SuggestedFixes: suggestedFixes, } } } - return diagnostics + s.mu.Lock() + defer s.mu.Unlock() + if s.orphanedFileDiagnostics == nil { // another thread may have won the race + s.orphanedFileDiagnostics = diagnostics + } + return s.orphanedFileDiagnostics, nil } // TODO(golang/go#53756): this function needs to consider more than just the @@ -1848,7 +1940,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC reinit := false wsModFiles, wsModFilesErr := s.workspaceModFiles, s.workspaceModFilesErr - if workURI := s.view.effectiveGOWORK(); workURI != "" { + if workURI, _ := s.view.GOWORK(); workURI != "" { if change, ok := changes[workURI]; ok { wsModFiles, wsModFilesErr = computeWorkspaceModFiles(ctx, s.view.gomod, workURI, s.view.effectiveGO111MODULE(), &unappliedChanges{ originalSnapshot: s, diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index 884b0fcda2c..db2c1dc34f0 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -145,13 +145,16 @@ func (w workspaceInformation) effectiveGO111MODULE() go111module { } } -// effectiveGOWORK returns the effective GOWORK value for this workspace, if +// GOWORK returns the effective GOWORK value for this workspace, if // any, in URI form. -func (w workspaceInformation) effectiveGOWORK() span.URI { +// +// The second result reports whether the effective GOWORK value is "" because +// GOWORK=off. +func (w workspaceInformation) GOWORK() (span.URI, bool) { if w.gowork == "off" || w.gowork == "" { - return "" + return "", w.gowork == "off" } - return span.URIFromPath(w.gowork) + return span.URIFromPath(w.gowork), false } // GO111MODULE returns the value of GO111MODULE to use for running the go @@ -540,7 +543,7 @@ func (v *View) relevantChange(c source.FileModification) bool { // // TODO(rfindley): Make sure the go.work files are always known // to the view. - if c.URI == v.effectiveGOWORK() { + if gowork, _ := v.GOWORK(); gowork == c.URI { return true } diff --git a/gopls/internal/lsp/code_action.go b/gopls/internal/lsp/code_action.go index 5819565407f..8658ba5588b 100644 --- a/gopls/internal/lsp/code_action.go +++ b/gopls/internal/lsp/code_action.go @@ -176,6 +176,18 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara }, }) } + + diags, err := snapshot.OrphanedFileDiagnostics(ctx) + if err != nil { + return nil, err + } + if d, ok := diags[fh.URI()]; ok { + quickFixes, err := codeActionsMatchingDiagnostics(ctx, snapshot, diagnostics, []*source.Diagnostic{d}) + if err != nil { + return nil, err + } + codeActions = append(codeActions, quickFixes...) + } } if ctx.Err() != nil { return nil, ctx.Err() diff --git a/gopls/internal/lsp/command.go b/gopls/internal/lsp/command.go index 6fa831201f4..7236087ddbd 100644 --- a/gopls/internal/lsp/command.go +++ b/gopls/internal/lsp/command.go @@ -22,6 +22,7 @@ import ( "golang.org/x/mod/modfile" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/gopls/internal/bug" "golang.org/x/tools/gopls/internal/govulncheck" "golang.org/x/tools/gopls/internal/lsp/cache" "golang.org/x/tools/gopls/internal/lsp/command" @@ -69,6 +70,7 @@ type commandConfig struct { async bool // whether to run the command asynchronously. Async commands can only return errors. requireSave bool // whether all files must be saved for the command to work progress string // title to use for progress reporting. If empty, no progress will be reported. + forView string // view to resolve to a snapshot; incompatible with forURI forURI protocol.DocumentURI // URI to resolve to a snapshot. If unset, snapshot will be nil. } @@ -103,6 +105,9 @@ func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run command } } var deps commandDeps + if cfg.forURI != "" && cfg.forView != "" { + return bug.Errorf("internal error: forURI=%q, forView=%q", cfg.forURI, cfg.forView) + } if cfg.forURI != "" { var ok bool var release func() @@ -114,6 +119,17 @@ func (c *commandHandler) run(ctx context.Context, cfg commandConfig, run command } return fmt.Errorf("invalid file URL: %v", cfg.forURI) } + } else if cfg.forView != "" { + view, err := c.s.session.View(cfg.forView) + if err != nil { + return err + } + var release func() + deps.snapshot, release, err = view.Snapshot() + if err != nil { + return err + } + defer release() } ctx, cancel := context.WithCancel(xcontext.Detach(ctx)) if cfg.progress != "" { @@ -576,40 +592,26 @@ func (s *Server) runGoModUpdateCommands(ctx context.Context, snapshot source.Sna } modURI := snapshot.GoModForFile(uri) sumURI := span.URIFromPath(strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum") - modEdits, err := applyFileEdits(ctx, snapshot, modURI, newModBytes) + modEdits, err := collectFileEdits(ctx, snapshot, modURI, newModBytes) if err != nil { return err } - sumEdits, err := applyFileEdits(ctx, snapshot, sumURI, newSumBytes) + sumEdits, err := collectFileEdits(ctx, snapshot, sumURI, newSumBytes) if err != nil { return err } - changes := append(sumEdits, modEdits...) - if len(changes) == 0 { - return nil - } - documentChanges := []protocol.DocumentChanges{} // must be a slice - for _, change := range changes { - change := change - documentChanges = append(documentChanges, protocol.DocumentChanges{ - TextDocumentEdit: &change, - }) - } - response, err := s.client.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ - Edit: protocol.WorkspaceEdit{ - DocumentChanges: documentChanges, - }, - }) - if err != nil { - return err - } - if !response.Applied { - return fmt.Errorf("edits not applied because of %s", response.FailureReason) - } - return nil + return applyFileEdits(ctx, s.client, append(sumEdits, modEdits...)) } -func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) { +// collectFileEdits collects any file edits required to transform the snapshot +// file specified by uri to the provided new content. +// +// If the file is not open, collectFileEdits simply writes the new content to +// disk. +// +// TODO(rfindley): fix this API asymmetry. It should be up to the caller to +// write the file or apply the edits. +func collectFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, newContent []byte) ([]protocol.TextDocumentEdit, error) { fh, err := snapshot.ReadFile(ctx, uri) if err != nil { return nil, err @@ -618,6 +620,7 @@ func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, if err != nil && !os.IsNotExist(err) { return nil, err } + if bytes.Equal(oldContent, newContent) { return nil, nil } @@ -647,6 +650,31 @@ func applyFileEdits(ctx context.Context, snapshot source.Snapshot, uri span.URI, }}, nil } +func applyFileEdits(ctx context.Context, cli protocol.Client, edits []protocol.TextDocumentEdit) error { + if len(edits) == 0 { + return nil + } + documentChanges := []protocol.DocumentChanges{} // must be a slice + for _, change := range edits { + change := change + documentChanges = append(documentChanges, protocol.DocumentChanges{ + TextDocumentEdit: &change, + }) + } + response, err := cli.ApplyEdit(ctx, &protocol.ApplyWorkspaceEditParams{ + Edit: protocol.WorkspaceEdit{ + DocumentChanges: documentChanges, + }, + }) + if err != nil { + return err + } + if !response.Applied { + return fmt.Errorf("edits not applied because of %s", response.FailureReason) + } + return nil +} + func runGoGetModule(invoke func(...string) (*bytes.Buffer, error), addRequire bool, args []string) error { if addRequire { if err := addModuleRequire(invoke, args); err != nil { @@ -1038,3 +1066,82 @@ func collectPackageStats(md []*source.Metadata) command.PackageStats { return stats } + +// RunGoWorkCommand invokes `go work ` with the provided arguments. +// +// args.InitFirst controls whether to first run `go work init`. This allows a +// single command to both create and recursively populate a go.work file -- as +// of writing there is no `go work init -r`. +// +// Some thought went into implementing this command. Unlike the go.mod commands +// above, this command simply invokes the go command and relies on the client +// to notify gopls of file changes via didChangeWatchedFile notifications. +// We could instead run these commands with GOWORK set to a temp file, but that +// poses the following problems: +// - directory locations in the resulting temp go.work file will be computed +// relative to the directory containing that go.work. If the go.work is in a +// tempdir, the directories will need to be translated to/from that dir. +// - it would be simpler to use a temp go.work file in the workspace +// directory, or whichever directory contains the real go.work file, but +// that sets a bad precedent of writing to a user-owned directory. We +// shouldn't start doing that. +// - Sending workspace edits to create a go.work file would require using +// the CreateFile resource operation, which would need to be tested in every +// client as we haven't used it before. We don't have time for that right +// now. +// +// Therefore, we simply require that the current go.work file is saved (if it +// exists), and delegate to the go command. +func (c *commandHandler) RunGoWorkCommand(ctx context.Context, args command.RunGoWorkArgs) error { + return c.run(ctx, commandConfig{ + progress: "Running go work command", + forView: args.ViewID, + }, func(ctx context.Context, deps commandDeps) (runErr error) { + snapshot := deps.snapshot + view := snapshot.View().(*cache.View) + viewDir := view.Folder().Filename() + + // If the user has explicitly set GOWORK=off, we should warn them + // explicitly and avoid potentially misleading errors below. + goworkURI, off := view.GOWORK() + if off { + return fmt.Errorf("cannot modify go.work files when GOWORK=off") + } + gowork := goworkURI.Filename() + + if goworkURI != "" { + fh, err := snapshot.ReadFile(ctx, goworkURI) + if err != nil { + return fmt.Errorf("reading current go.work file: %v", err) + } + if !fh.Saved() { + return fmt.Errorf("must save workspace file %s before running go work commands", goworkURI) + } + } else { + if !args.InitFirst { + // If go.work does not exist, we should have detected that and asked + // for InitFirst. + return bug.Errorf("internal error: cannot run go work command: required go.work file not found") + } + gowork = filepath.Join(viewDir, "go.work") + if err := c.invokeGoWork(ctx, viewDir, gowork, []string{"init"}); err != nil { + return fmt.Errorf("running `go work init`: %v", err) + } + } + + return c.invokeGoWork(ctx, viewDir, gowork, args.Args) + }) +} + +func (c *commandHandler) invokeGoWork(ctx context.Context, viewDir, gowork string, args []string) error { + inv := gocommand.Invocation{ + Verb: "work", + Args: args, + WorkingDir: viewDir, + Env: append(os.Environ(), fmt.Sprintf("GOWORK=%s", gowork)), + } + if _, err := c.s.session.GoCommandRunner().Run(ctx, inv); err != nil { + return fmt.Errorf("running go work command: %v", err) + } + return nil +} diff --git a/gopls/internal/lsp/command/command_gen.go b/gopls/internal/lsp/command/command_gen.go index a6f9940ad16..8003b17ff86 100644 --- a/gopls/internal/lsp/command/command_gen.go +++ b/gopls/internal/lsp/command/command_gen.go @@ -34,6 +34,7 @@ const ( RegenerateCgo Command = "regenerate_cgo" RemoveDependency Command = "remove_dependency" ResetGoModDiagnostics Command = "reset_go_mod_diagnostics" + RunGoWorkCommand Command = "run_go_work_command" RunGovulncheck Command = "run_govulncheck" RunTests Command = "run_tests" StartDebugging Command = "start_debugging" @@ -62,6 +63,7 @@ var Commands = []Command{ RegenerateCgo, RemoveDependency, ResetGoModDiagnostics, + RunGoWorkCommand, RunGovulncheck, RunTests, StartDebugging, @@ -162,6 +164,12 @@ func Dispatch(ctx context.Context, params *protocol.ExecuteCommandParams, s Inte return nil, err } return nil, s.ResetGoModDiagnostics(ctx, a0) + case "gopls.run_go_work_command": + var a0 RunGoWorkArgs + if err := UnmarshalArgs(params.Arguments, &a0); err != nil { + return nil, err + } + return nil, s.RunGoWorkCommand(ctx, a0) case "gopls.run_govulncheck": var a0 VulncheckArgs if err := UnmarshalArgs(params.Arguments, &a0); err != nil { @@ -404,6 +412,18 @@ func NewResetGoModDiagnosticsCommand(title string, a0 ResetGoModDiagnosticsArgs) }, nil } +func NewRunGoWorkCommandCommand(title string, a0 RunGoWorkArgs) (protocol.Command, error) { + args, err := MarshalArgs(a0) + if err != nil { + return protocol.Command{}, err + } + return protocol.Command{ + Title: title, + Command: "gopls.run_go_work_command", + Arguments: args, + }, nil +} + func NewRunGovulncheckCommand(title string, a0 VulncheckArgs) (protocol.Command, error) { args, err := MarshalArgs(a0) if err != nil { diff --git a/gopls/internal/lsp/command/interface.go b/gopls/internal/lsp/command/interface.go index 969ed8ae242..1342e843810 100644 --- a/gopls/internal/lsp/command/interface.go +++ b/gopls/internal/lsp/command/interface.go @@ -170,6 +170,10 @@ type Interface interface { // This command is intended for internal use only, by the gopls stats // command. WorkspaceStats(context.Context) (WorkspaceStatsResult, error) + + // RunGoWorkCommand: run `go work [args...]`, and apply the resulting go.work + // edits to the current go.work file. + RunGoWorkCommand(context.Context, RunGoWorkArgs) error } type RunTestsArgs struct { @@ -447,3 +451,9 @@ type PackageStats struct { CompiledGoFiles int // total number of compiled Go files across all packages Modules int // total number of unique modules } + +type RunGoWorkArgs struct { + ViewID string // ID of the view to run the command from + InitFirst bool // Whether to run `go work init` first + Args []string // Args to pass to `go work` +} diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go index 26f22422607..90c22321c69 100644 --- a/gopls/internal/lsp/diagnostics.go +++ b/gopls/internal/lsp/diagnostics.go @@ -391,8 +391,14 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, analyze // Orphaned files. // Confirm that every opened file belongs to a package (if any exist in // the workspace). Otherwise, add a diagnostic to the file. - for uri, diag := range snapshot.OrphanedFileDiagnostics(ctx) { - s.storeDiagnostics(snapshot, uri, orphanedSource, []*source.Diagnostic{diag}, true) + if diags, err := snapshot.OrphanedFileDiagnostics(ctx); err == nil { + for uri, diag := range diags { + s.storeDiagnostics(snapshot, uri, orphanedSource, []*source.Diagnostic{diag}, true) + } + } else { + if ctx.Err() == nil { + event.Error(ctx, "computing orphaned file diagnostics", err, source.SnapshotLabels(snapshot)...) + } } } diff --git a/gopls/internal/lsp/lsp_test.go b/gopls/internal/lsp/lsp_test.go index bbe1f1ca5f8..ed3baa20eb6 100644 --- a/gopls/internal/lsp/lsp_test.go +++ b/gopls/internal/lsp/lsp_test.go @@ -241,7 +241,7 @@ func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnostic) { // Get the diagnostics for this view if we have not done it before. - v := r.server.session.View(r.data.Config.Dir) + v := r.server.session.ViewByName(r.data.Config.Dir) r.collectDiagnostics(v) tests.CompareDiagnostics(t, uri, want, r.diagnostics[uri]) } diff --git a/gopls/internal/lsp/regtest/marker.go b/gopls/internal/lsp/regtest/marker.go index ddd05af8c3d..29722c943d4 100644 --- a/gopls/internal/lsp/regtest/marker.go +++ b/gopls/internal/lsp/regtest/marker.go @@ -1484,7 +1484,7 @@ func codeActionMarker(mark marker, actionKind string, start, end protocol.Locati // Apply the fix it suggests. changed, err := codeAction(mark.run.env, loc.URI, loc.Range, actionKind, nil) if err != nil { - mark.errorf("suggestedfix failed: %v. (Use @suggestedfixerr for expected errors.)", err) + mark.errorf("codeAction failed: %v", err) return } diff --git a/gopls/internal/lsp/source/api_json.go b/gopls/internal/lsp/source/api_json.go index d6fddc995e7..281772b889a 100644 --- a/gopls/internal/lsp/source/api_json.go +++ b/gopls/internal/lsp/source/api_json.go @@ -768,6 +768,12 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "Reset diagnostics in the go.mod file of a module.", ArgDoc: "{\n\t\"URIArg\": {\n\t\t\"URI\": string,\n\t},\n\t// Optional: source of the diagnostics to reset.\n\t// If not set, all resettable go.mod diagnostics will be cleared.\n\t\"DiagnosticSource\": string,\n}", }, + { + Command: "gopls.run_go_work_command", + Title: "run `go work [args...]`, and apply the resulting go.work", + Doc: "edits to the current go.work file.", + ArgDoc: "{\n\t\"ViewID\": string,\n\t\"InitFirst\": bool,\n\t\"Args\": []string,\n}", + }, { Command: "gopls.run_govulncheck", Title: "Run govulncheck.", diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index 2a16ad60676..e77f9d2dec8 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -209,7 +209,9 @@ type Snapshot interface { // OrphanedFileDiagnostics reports diagnostics for files that have no package // associations or which only have only command-line-arguments packages. - OrphanedFileDiagnostics(ctx context.Context) map[span.URI]*Diagnostic + // + // The caller must not mutate the result. + OrphanedFileDiagnostics(ctx context.Context) (map[span.URI]*Diagnostic, error) // -- package type-checking -- diff --git a/gopls/internal/lsp/workspace.go b/gopls/internal/lsp/workspace.go index 53cdcacdaf9..818135e94a2 100644 --- a/gopls/internal/lsp/workspace.go +++ b/gopls/internal/lsp/workspace.go @@ -17,7 +17,7 @@ import ( func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) error { event := params.Event for _, folder := range event.Removed { - view := s.session.View(folder.Name) + view := s.session.ViewByName(folder.Name) if view != nil { s.session.RemoveView(view) } else { diff --git a/gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt b/gopls/internal/regtest/marker/testdata/diagnostics/addgowork.txt similarity index 84% rename from gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt rename to gopls/internal/regtest/marker/testdata/diagnostics/addgowork.txt index dbd1a954bea..2cb7d2bf81b 100644 --- a/gopls/internal/regtest/marker/testdata/quickfix/addgowork.txt +++ b/gopls/internal/regtest/marker/testdata/diagnostics/addgowork.txt @@ -1,6 +1,7 @@ -This test demonstrates the quick-fix for adding a go.work file. +This test demonstrates diagnostics for adding a go.work file. + +Quick-fixes change files on disk, so are tested by regtests. -TODO(rfindley): actually add quick-fixes here. TODO(rfindley): improve the "cannot find package" import errors. -- flags -- diff --git a/gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt b/gopls/internal/regtest/marker/testdata/diagnostics/usemodule.txt similarity index 79% rename from gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt rename to gopls/internal/regtest/marker/testdata/diagnostics/usemodule.txt index 62f4efcf9c8..35d2e43bf23 100644 --- a/gopls/internal/regtest/marker/testdata/quickfix/usemodule.txt +++ b/gopls/internal/regtest/marker/testdata/diagnostics/usemodule.txt @@ -1,6 +1,7 @@ -This test demonstrates the quick-fix for using a module directory. +This test demonstrates diagnostics for a module that is missing from the +go.work file. -TODO(rfindley): actually add quick-fixes here. +Quick-fixes change files on disk, so are tested by regtests. -- flags -- -min_go=go1.18 @@ -11,6 +12,7 @@ go 1.21 use ( ./a ) + -- a/go.mod -- module mod.com/a diff --git a/gopls/internal/regtest/workspace/quickfix_test.go b/gopls/internal/regtest/workspace/quickfix_test.go new file mode 100644 index 00000000000..5cb08f06480 --- /dev/null +++ b/gopls/internal/regtest/workspace/quickfix_test.go @@ -0,0 +1,344 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/tools/gopls/internal/lsp/protocol" + "golang.org/x/tools/gopls/internal/lsp/tests/compare" + "golang.org/x/tools/internal/testenv" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +func TestQuickFix_UseModule(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // needs go.work + + const files = ` +-- go.work -- +go 1.20 + +use ( + ./a +) +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main + +import "mod.com/a/lib" + +func main() { + _ = lib.C +} + +-- a/lib/lib.go -- +package lib + +const C = "b" +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main + +import "mod.com/b/lib" + +func main() { + _ = lib.C +} + +-- b/lib/lib.go -- +package lib + +const C = "b" +` + + for _, title := range []string{ + "Use this module", + "Use all modules", + } { + t.Run(title, func(t *testing.T) { + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("b/main.go") + var d protocol.PublishDiagnosticsParams + env.AfterChange(ReadDiagnostics("b/main.go", &d)) + fixes := env.GetQuickFixes("b/main.go", d.Diagnostics) + var toApply []protocol.CodeAction + for _, fix := range fixes { + if strings.Contains(fix.Title, title) { + toApply = append(toApply, fix) + } + } + if len(toApply) != 1 { + t.Fatalf("codeAction: got %d quick fixes matching %q, want 1; got: %v", len(toApply), title, toApply) + } + env.ApplyCodeAction(toApply[0]) + env.AfterChange(NoDiagnostics()) + want := `go 1.20 + +use ( + ./a + ./b +) +` + got := env.ReadWorkspaceFile("go.work") + if diff := compare.Text(want, got); diff != "" { + t.Errorf("unexpeced go.work content:\n%s", diff) + } + }) + }) + } +} + +func TestQuickFix_AddGoWork(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // needs go.work + + v := goVersion(t) + const files = ` +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main + +import "mod.com/a/lib" + +func main() { + _ = lib.C +} + +-- a/lib/lib.go -- +package lib + +const C = "b" +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main + +import "mod.com/b/lib" + +func main() { + _ = lib.C +} + +-- b/lib/lib.go -- +package lib + +const C = "b" +` + + tests := []struct { + name string + file string + title string + want string + }{ + { + "use b", + "b/main.go", + "Add a go.work file using this module", + fmt.Sprintf(`go 1.%d + +use ./b +`, v), + }, + { + "use a", + "a/main.go", + "Add a go.work file using this module", + fmt.Sprintf(`go 1.%d + +use ./a +`, v), + }, + { + "use all", + "a/main.go", + "Add a go.work file using all modules", + fmt.Sprintf(`go 1.%d + +use ( + ./a + ./b +) +`, v), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile(test.file) + var d protocol.PublishDiagnosticsParams + env.AfterChange(ReadDiagnostics(test.file, &d)) + fixes := env.GetQuickFixes(test.file, d.Diagnostics) + var toApply []protocol.CodeAction + for _, fix := range fixes { + if strings.Contains(fix.Title, test.title) { + toApply = append(toApply, fix) + } + } + if len(toApply) != 1 { + t.Fatalf("codeAction: got %d quick fixes matching %q, want 1; got: %v", len(toApply), test.title, toApply) + } + env.ApplyCodeAction(toApply[0]) + env.AfterChange( + NoDiagnostics(ForFile(test.file)), + ) + + got := env.ReadWorkspaceFile("go.work") + if diff := compare.Text(test.want, got); diff != "" { + t.Errorf("unexpected go.work content:\n%s", diff) + } + }) + }) + } +} + +func TestQuickFix_UnsavedGoWork(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // needs go.work + + const files = ` +-- go.work -- +go 1.21 + +use ( + ./a +) +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main + +func main() {} +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main + +func main() {} +` + + for _, title := range []string{ + "Use this module", + "Use all modules", + } { + t.Run(title, func(t *testing.T) { + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.work") + env.OpenFile("b/main.go") + env.RegexpReplace("go.work", "go 1.21", "go 1.21 // arbitrary comment") + var d protocol.PublishDiagnosticsParams + env.AfterChange(ReadDiagnostics("b/main.go", &d)) + fixes := env.GetQuickFixes("b/main.go", d.Diagnostics) + var toApply []protocol.CodeAction + for _, fix := range fixes { + if strings.Contains(fix.Title, title) { + toApply = append(toApply, fix) + } + } + if len(toApply) != 1 { + t.Fatalf("codeAction: got %d quick fixes matching %q, want 1; got: %v", len(toApply), title, toApply) + } + fix := toApply[0] + err := env.Editor.ApplyCodeAction(env.Ctx, fix) + if err == nil { + t.Fatalf("codeAction(%q) succeeded unexpectedly", fix.Title) + } + + if got := err.Error(); !strings.Contains(got, "must save") { + t.Errorf("codeAction(%q) returned error %q, want containing \"must save\"", fix.Title, err) + } + }) + }) + } +} + +func TestQuickFix_GOWORKOff(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // needs go.work + + const files = ` +-- go.work -- +go 1.21 + +use ( + ./a +) +-- a/go.mod -- +module mod.com/a + +go 1.18 + +-- a/main.go -- +package main + +func main() {} +-- b/go.mod -- +module mod.com/b + +go 1.18 + +-- b/main.go -- +package main + +func main() {} +` + + for _, title := range []string{ + "Use this module", + "Use all modules", + } { + t.Run(title, func(t *testing.T) { + WithOptions( + EnvVars{"GOWORK": "off"}, + ).Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("go.work") + env.OpenFile("b/main.go") + var d protocol.PublishDiagnosticsParams + env.AfterChange(ReadDiagnostics("b/main.go", &d)) + fixes := env.GetQuickFixes("b/main.go", d.Diagnostics) + var toApply []protocol.CodeAction + for _, fix := range fixes { + if strings.Contains(fix.Title, title) { + toApply = append(toApply, fix) + } + } + if len(toApply) != 1 { + t.Fatalf("codeAction: got %d quick fixes matching %q, want 1; got: %v", len(toApply), title, toApply) + } + fix := toApply[0] + err := env.Editor.ApplyCodeAction(env.Ctx, fix) + if err == nil { + t.Fatalf("codeAction(%q) succeeded unexpectedly", fix.Title) + } + + if got := err.Error(); !strings.Contains(got, "GOWORK=off") { + t.Errorf("codeAction(%q) returned error %q, want containing \"GOWORK=off\"", fix.Title, err) + } + }) + }) + } +} From 7f203f0c38db456577f2a121e37cfa97a0a0557d Mon Sep 17 00:00:00 2001 From: Tim King Date: Thu, 11 May 2023 16:38:37 -0700 Subject: [PATCH 026/109] go/ssa: consolidate use of underlying pointer Consolidates the use of typ.Underlying().(*types.Pointer) to the deptr function. This function replace isPointer and the old deref function. Both have been replaced. This allows for tracking where underlying pointers are used. Follow up CLs will try to move away from using underlying pointers when necessary. Switches deptr for mustDeref in obviously safe locations (alloc.Type()). Adds a new deref function that uses the core type instead of the underlying type. Change-Id: Id51f95e87cb40a13d43fd595d1f20b21b8325eeb Reviewed-on: https://go-review.googlesource.com/c/tools/+/494976 TryBot-Result: Gopher Robot Run-TryBot: Tim King gopls-CI: kokoro Reviewed-by: Alan Donovan --- go/ssa/builder.go | 49 +++++++++++++++++++++------------- go/ssa/builder_generic_test.go | 36 ++++++++++++++++--------- go/ssa/emit.go | 21 ++++++++++----- go/ssa/func.go | 2 +- go/ssa/lift.go | 4 +-- go/ssa/methods.go | 5 +++- go/ssa/print.go | 7 ++--- go/ssa/subst.go | 4 +-- go/ssa/util.go | 39 ++++++++++++++------------- go/ssa/wrappers.go | 10 ++++--- 10 files changed, 108 insertions(+), 69 deletions(-) diff --git a/go/ssa/builder.go b/go/ssa/builder.go index ffa666732fe..3ad4f391bdc 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -363,7 +363,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ } case "new": - alloc := emitNew(fn, deref(typ), pos) + alloc := emitNew(fn, mustDeref(typ), pos) alloc.Comment = "new" return alloc @@ -375,8 +375,8 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // been constant-folded.) // // Type parameters are always non-constant so use Underlying. - t := deref(fn.typeOf(args[0])).Underlying() - if at, ok := t.(*types.Array); ok { + t, _ := deptr(fn.typeOf(args[0])) + if at, ok := t.Underlying().(*types.Array); ok { b.expr(fn, args[0]) // for effects only return intConst(at.Len()) } @@ -431,7 +431,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { return &address{addr: v, pos: e.Pos(), expr: e} case *ast.CompositeLit: - t := deref(fn.typeOf(e)) + t, _ := deptr(fn.typeOf(e)) var v *Alloc if escaping { v = emitNew(fn, t, e.Lbrace) @@ -459,7 +459,8 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { wantAddr := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) index := sel.index[len(sel.index)-1] - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) + dt, _ := deptr(v.Type()) + fld := typeparams.CoreType(dt).(*types.Struct).Field(index) // Due to the two phases of resolving AssignStmt, a panic from x.f = p() // when x is nil is required to come after the side-effects of @@ -508,7 +509,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { v.setType(et) return fn.emit(v) } - return &lazyAddress{addr: emit, t: deref(et), pos: e.Lbrack, expr: e} + return &lazyAddress{addr: emit, t: mustDeref(et), pos: e.Lbrack, expr: e} case *ast.StarExpr: return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} @@ -554,7 +555,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // so if the type of the location is a pointer, // an &-operation is implied. if _, ok := loc.(blank); !ok { // avoid calling blank.typ() - if isPointer(loc.typ()) { + if _, ok := deptr(loc.typ()); ok { ptr := b.addr(fn, e, true).address(fn) // copy address if sb != nil { @@ -584,7 +585,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // Subtle: emit debug ref for aggregate types only; // slice and map are handled by store ops in compLit. - switch loc.typ().Underlying().(type) { + switch loc.typ().Underlying().(type) { // TODO(taking): check if Underlying() appropriate. case *types.Struct, *types.Array: emitDebugRef(fn, e, addr, true) } @@ -831,7 +832,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // The result is a "bound". obj := sel.obj.(*types.Func) rt := fn.typ(recvType(obj)) - wantAddr := isPointer(rt) + _, wantAddr := deptr(rt) escaping := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) @@ -958,8 +959,9 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // // escaping is defined as per builder.addr(). func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value { + var v Value - if wantAddr && !sel.indirect && !isPointer(fn.typeOf(e)) { + if _, eptr := deptr(fn.typeOf(e)); wantAddr && !sel.indirect && !eptr { v = b.addr(fn, e, escaping).address(fn) } else { v = b.expr(fn, e) @@ -968,7 +970,7 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se last := len(sel.index) - 1 // The position of implicit selection is the position of the inducing receiver expression. v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos()) - if !wantAddr && isPointer(v.Type()) { + if _, vptr := deptr(v.Type()); !wantAddr && vptr { v = emitLoad(fn, v) } return v @@ -987,7 +989,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { obj := sel.obj.(*types.Func) recv := recvType(obj) - wantAddr := isPointer(recv) + _, wantAddr := deptr(recv) escaping := true v := b.receiver(fn, selector.X, wantAddr, escaping, sel) if types.IsInterface(recv) { @@ -1253,8 +1255,10 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.typeOf(e)) // type with name [may be type param] - t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case + typ, _ := deptr(fn.typeOf(e)) // type with name [may be type param] + t, _ := deptr(typeparams.CoreType(typ)) // core type for comp lit case + t = t.Underlying() + // Computing typ and t is subtle as these handle pointer types. // For example, &T{...} is valid even for maps and slices. // Also typ should refer to T (not *T) while t should be the core type of T. @@ -1282,7 +1286,8 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear - sb.store(&address{addr, e.Lbrace, nil}, zeroConst(deref(addr.Type()))) + dt, _ := deptr(addr.Type()) + sb.store(&address{addr, e.Lbrace, nil}, zeroConst(dt)) isZero = true } for i, e := range e.Elts { @@ -1326,7 +1331,8 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero if !isZero && int64(len(e.Elts)) != at.Len() { // memclear - sb.store(&address{array, e.Lbrace, nil}, zeroConst(deref(array.Type()))) + dt, _ := deptr(array.Type()) + sb.store(&address{array, e.Lbrace, nil}, zeroConst(dt)) } } @@ -1379,8 +1385,13 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // map[*struct{}]bool{{}: true} // An &-operation may be implied: // map[*struct{}]bool{&struct{}{}: true} + wantAddr := false + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { + _, wantAddr = t.Key().Underlying().(*types.Pointer) + } + var key Value - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + if wantAddr { // A CompositeLit never evaluates to a pointer, // so if the type of the location is a pointer, // an &-operation is implied. @@ -1873,7 +1884,8 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // Determine number of iterations. var length Value - if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + dt, _ := deptr(x.Type()) + if arr, ok := dt.Underlying().(*types.Array); ok { // For array or *array, the number of iterations is // known statically thanks to the type. We avoid a // data dependence upon x, permitting later dead-code @@ -1882,6 +1894,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // We still generate code for x, in case it has effects. // // TypeParams do not have constant length. Use underlying instead of core type. + // TODO: check if needed. length = intConst(arr.Len()) } else { // length = len(x). diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index c5543e37779..4c3ac171a3b 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -5,6 +5,7 @@ package ssa_test import ( + "bytes" "fmt" "go/parser" "go/token" @@ -540,30 +541,30 @@ func TestInstructionString(t *testing.T) { const contents = ` package p - //@ instrs("f", "*ssa.TypeAssert") - //@ instrs("f", "*ssa.Call", "print(nil:interface{}, 0:int)") - func f(x int) { // non-generic smoke test. + //@ instrs("f0", "*ssa.TypeAssert") + //@ instrs("f0", "*ssa.Call", "print(nil:interface{}, 0:int)") + func f0(x int) { // non-generic smoke test. var i interface{} print(i, 0) } - //@ instrs("h", "*ssa.Alloc", "local T (u)") - //@ instrs("h", "*ssa.FieldAddr", "&t0.x [#0]") - func h[T ~struct{ x string }]() T { + //@ instrs("f1", "*ssa.Alloc", "local T (u)") + //@ instrs("f1", "*ssa.FieldAddr", "&t0.x [#0]") + func f1[T ~struct{ x string }]() T { u := T{"lorem"} return u } - //@ instrs("c", "*ssa.TypeAssert", "typeassert t0.(interface{})") - //@ instrs("c", "*ssa.Call", "invoke x.foo()") - func c[T interface{ foo() string }](x T) { + //@ instrs("f2", "*ssa.TypeAssert", "typeassert t0.(interface{})") + //@ instrs("f2", "*ssa.Call", "invoke x.foo()") + func f2[T interface{ foo() string }](x T) { _ = x.foo _ = x.foo() } - //@ instrs("d", "*ssa.TypeAssert", "typeassert t0.(interface{})") - //@ instrs("d", "*ssa.Call", "invoke x.foo()") - func d[T interface{ foo() string; comparable }](x T) { + //@ instrs("f3", "*ssa.TypeAssert", "typeassert t0.(interface{})") + //@ instrs("f3", "*ssa.Call", "invoke x.foo()") + func f3[T interface{ foo() string; comparable }](x T) { _ = x.foo _ = x.foo() } @@ -643,7 +644,8 @@ func TestInstructionString(t *testing.T) { // Check each expectation. for key, value := range expectations { - if _, ok := p.Members[key.function]; !ok { + fn, ok := p.Members[key.function].(*ssa.Function) + if !ok { t.Errorf("Expectation on %s does not match a member in %s", key.function, p.Pkg.Name()) } got, want := value.matches, value.wants @@ -651,6 +653,7 @@ func TestInstructionString(t *testing.T) { sort.Strings(want) if !reflect.DeepEqual(want, got) { t.Errorf("Within %s wanted instructions of kind %s: %q. got %q", key.function, key.kind, want, got) + logFunction(t, fn) } } } @@ -664,3 +667,10 @@ func packageName(t testing.TB, content string) string { } return f.Name.Name } + +func logFunction(t testing.TB, fn *ssa.Function) { + // TODO: Consider adding a ssa.Function.GoString() so this can be logged to t via '%#v'. + var buf bytes.Buffer + ssa.WriteFunction(&buf, fn) + t.Log(buf.String()) +} diff --git a/go/ssa/emit.go b/go/ssa/emit.go index d3ba3e0c34c..d402e676b20 100644 --- a/go/ssa/emit.go +++ b/go/ssa/emit.go @@ -29,7 +29,7 @@ func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { // new temporary, and returns the value so defined. func emitLoad(f *Function, addr Value) *UnOp { v := &UnOp{Op: token.MUL, X: addr} - v.setType(deref(typeparams.CoreType(addr.Type()))) + v.setType(mustDeref(addr.Type())) f.emit(v) return v } @@ -478,9 +478,9 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - - if isPointer(v.Type()) { + st, vptr := deptr(v.Type()) + fld := typeparams.CoreType(st).(*types.Struct).Field(index) + if vptr { instr := &FieldAddr{ X: v, Field: index, @@ -489,7 +489,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) instr.setType(types.NewPointer(fld.Type())) v = f.emit(instr) // Load the field's value iff indirectly embedded. - if isPointer(fld.Type()) { + if _, fldptr := deptr(fld.Type()); fldptr { v = emitLoad(f, v) } } else { @@ -512,8 +512,15 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - if isPointer(v.Type()) { + // TODO(taking): Cover the following cases of interest + // func f[T any, S struct{f T}, P *struct{f T}, PS *S](x T) { + // _ := S{f: x} + // _ := P{f: x} + // _ := PS{f: x} + // } + st, vptr := deptr(v.Type()) + fld := typeparams.CoreType(st).(*types.Struct).Field(index) + if vptr { instr := &FieldAddr{ X: v, Field: index, diff --git a/go/ssa/func.go b/go/ssa/func.go index 71f1264e850..60cf53f6003 100644 --- a/go/ssa/func.go +++ b/go/ssa/func.go @@ -596,7 +596,7 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { if len(f.Locals) > 0 { buf.WriteString("# Locals:\n") for i, l := range f.Locals { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(mustDeref(l.Type()), from)) } } writeSignature(buf, from, f.Name(), f.Signature, f.Params) diff --git a/go/ssa/lift.go b/go/ssa/lift.go index b9cf7bc3e68..dbd8790c6f5 100644 --- a/go/ssa/lift.go +++ b/go/ssa/lift.go @@ -460,7 +460,7 @@ func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool *fresh++ phi.pos = alloc.Pos() - phi.setType(deref(alloc.Type())) + phi.setType(mustDeref(alloc.Type())) phi.block = v if debugLifting { fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) @@ -505,7 +505,7 @@ func replaceAll(x, y Value) { func renamed(renaming []Value, alloc *Alloc) Value { v := renaming[alloc.index] if v == nil { - v = zeroConst(deref(alloc.Type())) + v = zeroConst(mustDeref(alloc.Type())) renaming[alloc.index] = v } return v diff --git a/go/ssa/methods.go b/go/ssa/methods.go index 4185618cdd6..29449837138 100644 --- a/go/ssa/methods.go +++ b/go/ssa/methods.go @@ -101,8 +101,11 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creato sel := toSelection(sel) obj := sel.obj.(*types.Func) + _, ptrObj := deptr(recvType(obj)) + _, ptrRecv := deptr(sel.recv) + needsPromotion := len(sel.index) > 1 - needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.recv) + needsIndirection := !ptrObj && ptrRecv if needsPromotion || needsIndirection { fn = makeWrapper(prog, sel, cr) } else { diff --git a/go/ssa/print.go b/go/ssa/print.go index 8b783196e49..e47e51637d4 100644 --- a/go/ssa/print.go +++ b/go/ssa/print.go @@ -95,7 +95,7 @@ func (v *Alloc) String() string { op = "new" } from := v.Parent().relPkg() - return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) + return fmt.Sprintf("%s %s (%s)", op, relType(mustDeref(v.Type()), from), v.Comment) } func (v *Phi) String() string { @@ -259,7 +259,8 @@ func (v *MakeChan) String() string { } func (v *FieldAddr) String() string { - st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct) + dt, _ := deptr(v.X.Type()) + st := typeparams.CoreType(dt).(*types.Struct) // Be robust against a bad index. name := "?" if 0 <= v.Field && v.Field < st.NumFields() { @@ -452,7 +453,7 @@ func WritePackage(buf *bytes.Buffer, p *Package) { case *Global: fmt.Fprintf(buf, " var %-*s %s\n", - maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + maxname, name, relType(mustDeref(mem.Type()), from)) } } diff --git a/go/ssa/subst.go b/go/ssa/subst.go index 7efab3578a3..89c41a8d4c1 100644 --- a/go/ssa/subst.go +++ b/go/ssa/subst.go @@ -249,7 +249,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } // methods for the interface. Initially nil if there is no known change needed. - // Signatures for the method where recv is nil. NewInterfaceType fills in the recievers. + // Signatures for the method where recv is nil. NewInterfaceType fills in the receivers. var methods []*types.Func initMethods := func(n int) { // copy first n explicit methods methods = make([]*types.Func, iface.NumExplicitMethods()) @@ -262,7 +262,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { for i := 0; i < iface.NumExplicitMethods(); i++ { f := iface.ExplicitMethod(i) // On interfaces, we need to cycle break on anonymous interface types - // being in a cycle with their signatures being in cycles with their recievers + // being in a cycle with their signatures being in cycles with their receivers // that do not go through a Named. norecv := changeRecv(f.Type().(*types.Signature), nil) sig := subst.typ(norecv) diff --git a/go/ssa/util.go b/go/ssa/util.go index e532e1e12b5..53a74877bcb 100644 --- a/go/ssa/util.go +++ b/go/ssa/util.go @@ -43,12 +43,6 @@ func isBlankIdent(e ast.Expr) bool { //// Type utilities. Some of these belong in go/types. -// isPointer returns true for types whose underlying type is a pointer. -func isPointer(typ types.Type) bool { - _, ok := typ.Underlying().(*types.Pointer) - return ok -} - // isNonTypeParamInterface reports whether t is an interface type but not a type parameter. func isNonTypeParamInterface(t types.Type) bool { return !typeparams.IsTypeParam(t) && types.IsInterface(t) @@ -100,20 +94,31 @@ func isBasicConvTypes(tset termList) bool { return all && basics >= 1 && tset.Len()-basics <= 1 } -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { +// deptr returns a pointer's element type and true; otherwise it returns (typ, false). +// This function is oblivious to core types and is not suitable for generics. +// +// TODO: Deprecate this function once all usages have been audited. +func deptr(typ types.Type) (types.Type, bool) { if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() + return p.Elem(), true } - return typ + return typ, false +} + +// deref returns the element type of a type with a pointer core type and true; +// otherwise it returns (typ, false). +func deref(typ types.Type) (types.Type, bool) { + if p, ok := typeparams.CoreType(typ).(*types.Pointer); ok { + return p.Elem(), true + } + return typ, false } // mustDeref returns the element type of a type with a pointer core type. // Panics on failure. func mustDeref(typ types.Type) types.Type { - // TODO(taking): Replace deref with mustDeref when possible. - if p, ok := typeparams.CoreType(typ).(*types.Pointer); ok { - return p.Elem() + if et, ok := deref(typ); ok { + return et } panic("cannot dereference type " + typ.String()) } @@ -182,16 +187,14 @@ func nonbasicTypes(ts []types.Type) []types.Type { return filtered } -// receiverTypeArgs returns the type arguments to a function's reciever. -// Returns an empty list if obj does not have a reciever or its reciever does not have type arguments. +// receiverTypeArgs returns the type arguments to a function's receiver. +// Returns an empty list if obj does not have a receiver or its receiver does not have type arguments. func receiverTypeArgs(obj *types.Func) []types.Type { rtype := recvType(obj) if rtype == nil { return nil } - if isPointer(rtype) { - rtype = rtype.(*types.Pointer).Elem() - } + rtype, _ = deptr(rtype) named, ok := rtype.(*types.Named) if !ok { return nil diff --git a/go/ssa/wrappers.go b/go/ssa/wrappers.go index 228daf6158a..123ea6858aa 100644 --- a/go/ssa/wrappers.go +++ b/go/ssa/wrappers.go @@ -82,12 +82,14 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { indices := sel.index var v Value = fn.Locals[0] // spilled receiver - if isPointer(sel.recv) { + srdt, ptrRecv := deptr(sel.recv) + if ptrRecv { v = emitLoad(fn, v) // For simple indirection wrappers, perform an informative nil-check: // "value method (T).f called using nil *T pointer" - if len(indices) == 1 && !isPointer(recvType(obj)) { + _, ptrObj := deptr(recvType(obj)) + if len(indices) == 1 && !ptrObj { var c Call c.Call.Value = &Builtin{ name: "ssa:wrapnilchk", @@ -97,7 +99,7 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { } c.Call.Args = []Value{ v, - stringConst(deref(sel.recv).String()), + stringConst(srdt.String()), stringConst(sel.obj.Name()), } c.setType(v.Type()) @@ -121,7 +123,7 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { var c Call if r := recvType(obj); !types.IsInterface(r) { // concrete method - if !isPointer(r) { + if _, ptrObj := deptr(r); !ptrObj { v = emitLoad(fn, v) } callee := prog.originFunc(obj) From 9dcd3d5dc8a4c0f7078858978e5d9243136e66da Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 15 May 2023 17:58:47 -0400 Subject: [PATCH 027/109] gopls/internal/lsp/filecache: limit Get parallelism too This change causes Get to be subject to the same concurrency-limiting semaphore just added to Set, as a user (predictably) encountered thread exhaustion in this operation too. Oddly, the Get benchmark doesn't seem to be limited by the value of 128, or even smaller values. Updates golang/go#60089 Change-Id: Ie4632cbc4cdd6536558b2067a3d115cf4fa17fae Reviewed-on: https://go-review.googlesource.com/c/tools/+/495055 Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/filecache/filecache.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index ff5d5b3952d..9433245288c 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -56,6 +56,9 @@ func Start() { // possibly by another process. // Get returns ErrNotFound if the value was not found. func Get(kind string, key [32]byte) ([]byte, error) { + iolimit <- struct{}{} // acquire a token + defer func() { <-iolimit }() // release a token + name, err := filename(kind, key) if err != nil { return nil, err From 19d700c676b1fcf636c4f692494fba7baaec3d10 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 16 May 2023 12:10:36 -0400 Subject: [PATCH 028/109] go/ssa/interp: use the actual GOOS and GOARCH to interpret tests This incidentally uncovered a bug in the `fitsInt` helper function, which was computing the wrong bounds on 32-bit platforms (as if int were 4 bits instead of 4 bytes), which is also fixed. Fixes golang/go#60226 (hopefully). Change-Id: I56afdd3063dce233696f1e7a873dac4ee9ca231f Reviewed-on: https://go-review.googlesource.com/c/tools/+/495255 Reviewed-by: Robert Findley Auto-Submit: Bryan Mills Run-TryBot: Bryan Mills TryBot-Result: Gopher Robot --- go/ssa/interp/interp_test.go | 124 ++++++++++++++--------------------- go/ssa/interp/ops.go | 4 +- 2 files changed, 51 insertions(+), 77 deletions(-) diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go index 70ddceec720..817b4585ea3 100644 --- a/go/ssa/interp/interp_test.go +++ b/go/ssa/interp/interp_test.go @@ -23,9 +23,11 @@ import ( "log" "os" "path/filepath" + "runtime" "strings" "testing" "time" + "unsafe" "golang.org/x/tools/go/loader" "golang.org/x/tools/go/ssa" @@ -138,13 +140,7 @@ func init() { } } -// Specific GOARCH to use for a test case in go.tools/go/ssa/interp/testdata/. -// Defaults to amd64 otherwise. -var testdataArchs = map[string]string{ - "width32.go": "386", -} - -func run(t *testing.T, input string) bool { +func run(t *testing.T, input string) { // The recover2 test case is broken on Go 1.14+. See golang/go#34089. // TODO(matloob): Fix this. if filepath.Base(input) == "recover2.go" { @@ -157,16 +153,15 @@ func run(t *testing.T, input string) bool { ctx := build.Default // copy ctx.GOROOT = "testdata" // fake goroot - ctx.GOOS = "linux" - ctx.GOARCH = "amd64" - if arch, ok := testdataArchs[filepath.Base(input)]; ok { - ctx.GOARCH = arch + ctx.GOOS = runtime.GOOS + ctx.GOARCH = runtime.GOARCH + if filepath.Base(input) == "width32.go" && unsafe.Sizeof(int(0)) > 4 { + t.Skipf("skipping: width32.go checks behavior for a 32-bit int") } conf := loader.Config{Build: &ctx} if _, err := conf.FromArgs([]string{input}, true); err != nil { - t.Errorf("FromArgs(%s) failed: %s", input, err) - return false + t.Fatalf("FromArgs(%s) failed: %s", input, err) } conf.Import("runtime") @@ -188,8 +183,7 @@ func run(t *testing.T, input string) bool { iprog, err := conf.Load() if err != nil { - t.Errorf("conf.Load(%s) failed: %s", input, err) - return false + t.Fatalf("conf.Load(%s) failed: %s", input, err) } bmode := ssa.InstantiateGenerics | ssa.SanityCheckFunctions @@ -205,6 +199,9 @@ func run(t *testing.T, input string) bool { interp.CapturedOutput = new(bytes.Buffer) sizes := types.SizesFor("gc", ctx.GOARCH) + if sizes.Sizeof(types.Typ[types.Int]) < 4 { + panic("bogus SizesFor") + } hint = fmt.Sprintf("To trace execution, run:\n%% go build golang.org/x/tools/cmd/ssadump && ./ssadump -build=C -test -run --interp=T %s\n", input) var imode interp.Mode // default mode // imode |= interp.DisableRecover // enable for debugging @@ -223,17 +220,6 @@ func run(t *testing.T, input string) bool { if false { t.Log(input, time.Since(start)) // test profiling } - - return true -} - -func printFailures(failures []string) { - if failures != nil { - fmt.Println("The following tests failed:") - for _, f := range failures { - fmt.Printf("\t%s\n", f) - } - } } // TestTestdataFiles runs the interpreter on testdata/*.go. @@ -242,25 +228,20 @@ func TestTestdataFiles(t *testing.T) { if err != nil { log.Fatal(err) } - var failures []string for _, input := range testdataTests { - if !run(t, filepath.Join(cwd, "testdata", input)) { - failures = append(failures, input) - } + t.Run(input, func(t *testing.T) { + run(t, filepath.Join(cwd, "testdata", input)) + }) } - printFailures(failures) } // TestGorootTest runs the interpreter on $GOROOT/test/*.go. func TestGorootTest(t *testing.T) { - var failures []string - for _, input := range gorootTestTests { - if !run(t, filepath.Join(build.Default.GOROOT, "test", input)) { - failures = append(failures, input) - } + t.Run(input, func(t *testing.T) { + run(t, filepath.Join(build.Default.GOROOT, "test", input)) + }) } - printFailures(failures) } // TestTypeparamTest runs the interpreter on runnable examples @@ -274,19 +255,18 @@ func TestTypeparamTest(t *testing.T) { // Skip known failures for the given reason. // TODO(taking): Address these. skip := map[string]string{ - "chans.go": "interp tests do not support runtime.SetFinalizer", - "issue23536.go": "unknown reason", - "issue376214.go": "unknown issue with variadic cast on bytes", - "issue48042.go": "interp tests do not handle reflect.Value.SetInt", - "issue47716.go": "interp tests do not handle unsafe.Sizeof", - "issue50419.go": "interp tests do not handle dispatch to String() correctly", - "issue51733.go": "interp does not handle unsafe casts", - "ordered.go": "math.NaN() comparisons not being handled correctly", - "orderedmap.go": "interp tests do not support runtime.SetFinalizer", - "stringer.go": "unknown reason", - "issue48317.go": "interp tests do not support encoding/json", - "issue48318.go": "interp tests do not support encoding/json", - "issue58513.go": "interp tests do not support runtime.Caller", + "chans.go": "interp tests do not support runtime.SetFinalizer", + "issue23536.go": "unknown reason", + "issue48042.go": "interp tests do not handle reflect.Value.SetInt", + "issue47716.go": "interp tests do not handle unsafe.Sizeof", + "issue50419.go": "interp tests do not handle dispatch to String() correctly", + "issue51733.go": "interp does not handle unsafe casts", + "ordered.go": "math.NaN() comparisons not being handled correctly", + "orderedmap.go": "interp tests do not support runtime.SetFinalizer", + "stringer.go": "unknown reason", + "issue48317.go": "interp tests do not support encoding/json", + "issue48318.go": "interp tests do not support encoding/json", + "issue58513.go": "interp tests do not support runtime.Caller", } // Collect all of the .go files in dir that are runnable. dir := filepath.Join(build.Default.GOROOT, "test", "typeparam") @@ -294,34 +274,28 @@ func TestTypeparamTest(t *testing.T) { if err != nil { t.Fatal(err) } - var inputs []string for _, entry := range list { if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".go") { continue // Consider standalone go files. } - if reason := skip[entry.Name()]; reason != "" { - t.Logf("skipping %q due to %s.", entry.Name(), reason) - continue - } - input := filepath.Join(dir, entry.Name()) - src, err := os.ReadFile(input) - if err != nil { - t.Fatal(err) - } - // Only build test files that can be compiled, or compiled and run. - if bytes.HasPrefix(src, []byte("// run")) && !bytes.HasPrefix(src, []byte("// rundir")) { - inputs = append(inputs, input) - } else { - t.Logf("Not a `// run` file: %s", entry.Name()) - } - } - - var failures []string - for _, input := range inputs { - t.Log("running", input) - if !run(t, input) { - failures = append(failures, input) - } + t.Run(entry.Name(), func(t *testing.T) { + input := filepath.Join(dir, entry.Name()) + src, err := os.ReadFile(input) + if err != nil { + t.Fatal(err) + } + + // Only build test files that can be compiled, or compiled and run. + if !bytes.HasPrefix(src, []byte("// run")) || bytes.HasPrefix(src, []byte("// rundir")) { + t.Logf("Not a `// run` file: %s", entry.Name()) + return + } + + if reason := skip[entry.Name()]; reason != "" { + t.Skipf("skipping: %s", reason) + } + + run(t, input) + }) } - printFailures(failures) } diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go index 39830bc8fcb..a42d89b4f6b 100644 --- a/go/ssa/interp/ops.go +++ b/go/ssa/interp/ops.go @@ -92,8 +92,8 @@ func constValue(c *ssa.Const) value { func fitsInt(x int64, sizes types.Sizes) bool { intSize := sizes.Sizeof(types.Typ[types.Int]) if intSize < sizes.Sizeof(types.Typ[types.Int64]) { - maxInt := int64(1)<<(intSize-1) - 1 - minInt := -int64(1) << (intSize - 1) + maxInt := int64(1)<<((intSize*8)-1) - 1 + minInt := -int64(1) << ((intSize * 8) - 1) return minInt <= x && x <= maxInt } return true From 8c0fcd2c6351bcedf55657b8a2762e21638fb392 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 9 May 2023 22:02:12 -0400 Subject: [PATCH 029/109] gopls/internal/lsp/lru: extract LRU logic to a standalone package Extract a simple LRU package from the parseCache implementation (which itself was extracted from an initial implementation of the filecache). In a subsequent CL, this package will be used to reduce I/O in the filecache package. For golang/go#57987 Change-Id: I307f397b71654226d4e0e1c532a81cfde49af831 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494099 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/internal/lsp/lru/lru.go | 153 +++++++++++++++++++++++ gopls/internal/lsp/lru/lru_fuzz_test.go | 41 +++++++ gopls/internal/lsp/lru/lru_test.go | 156 ++++++++++++++++++++++++ 3 files changed, 350 insertions(+) create mode 100644 gopls/internal/lsp/lru/lru.go create mode 100644 gopls/internal/lsp/lru/lru_fuzz_test.go create mode 100644 gopls/internal/lsp/lru/lru_test.go diff --git a/gopls/internal/lsp/lru/lru.go b/gopls/internal/lsp/lru/lru.go new file mode 100644 index 00000000000..5750f412bb0 --- /dev/null +++ b/gopls/internal/lsp/lru/lru.go @@ -0,0 +1,153 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The lru package implements a fixed-size in-memory LRU cache. +package lru + +import ( + "container/heap" + "fmt" + "sync" +) + +type any = interface{} // TODO: remove once gopls only builds at go1.18+ + +// A Cache is a fixed-size in-memory LRU cache. +type Cache struct { + capacity int + + mu sync.Mutex + used int // used capacity, in user-specified units + m map[any]*entry // k/v lookup + lru queue // min-atime priority queue of *entry + clock int64 // clock time, incremented whenever the cache is updated +} + +type entry struct { + key any + value any + size int // caller-specified size + atime int64 // last access / set time + index int // index of entry in the heap slice +} + +// New creates a new Cache with the given capacity, which must be positive. +// +// The cache capacity uses arbitrary units, which are specified during the Set +// operation. +func New(capacity int) *Cache { + if capacity == 0 { + panic("zero capacity") + } + + return &Cache{ + capacity: capacity, + m: make(map[any]*entry), + } +} + +// Get retrieves the value for the specified key, or nil if the key is not +// found. +// +// If the key is found, its access time is updated. +func (c *Cache) Get(key any) any { + c.mu.Lock() + defer c.mu.Unlock() + + c.clock++ // every access updates the clock + + if e, ok := c.m[key]; ok { // cache hit + e.atime = c.clock + heap.Fix(&c.lru, e.index) + return e.value + } + + return nil +} + +// Set stores a value for the specified key, using its given size to update the +// current cache size, evicting old entries as necessary to fit in the cache +// capacity. +// +// Size must be a non-negative value. If size is larger than the cache +// capacity, the value is not stored and the cache is not modified. +func (c *Cache) Set(key, value any, size int) { + if size < 0 { + panic(fmt.Sprintf("size must be non-negative, got %d", size)) + } + if size > c.capacity { + return // uncacheable + } + + c.mu.Lock() + defer c.mu.Unlock() + + c.clock++ + + // Remove the existing cache entry for key, if it exists. + e, ok := c.m[key] + if ok { + c.used -= e.size + heap.Remove(&c.lru, e.index) + delete(c.m, key) + } + + // Evict entries until the new value will fit. + newUsed := c.used + size + if newUsed < 0 { + return // integer overflow; return silently + } + c.used = newUsed + for c.used > c.capacity { + // evict oldest entry + e = heap.Pop(&c.lru).(*entry) + c.used -= e.size + delete(c.m, e.key) + } + + // Store the new value. + // Opt: e is evicted, so it can be reused to reduce allocation. + if e == nil { + e = new(entry) + } + e.key = key + e.value = value + e.size = size + e.atime = c.clock + c.m[e.key] = e + heap.Push(&c.lru, e) + + if len(c.m) != len(c.lru) { + panic("map and LRU are inconsistent") + } +} + +// -- priority queue boilerplate -- + +// queue is a min-atime priority queue of cache entries. +type queue []*entry + +func (q queue) Len() int { return len(q) } + +func (q queue) Less(i, j int) bool { return q[i].atime < q[j].atime } + +func (q queue) Swap(i, j int) { + q[i], q[j] = q[j], q[i] + q[i].index = i + q[j].index = j +} + +func (q *queue) Push(x any) { + e := x.(*entry) + e.index = len(*q) + *q = append(*q, e) +} + +func (q *queue) Pop() any { + last := len(*q) - 1 + e := (*q)[last] + (*q)[last] = nil // aid GC + *q = (*q)[:last] + return e +} diff --git a/gopls/internal/lsp/lru/lru_fuzz_test.go b/gopls/internal/lsp/lru/lru_fuzz_test.go new file mode 100644 index 00000000000..c5afceeaf3b --- /dev/null +++ b/gopls/internal/lsp/lru/lru_fuzz_test.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package lru_test + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/lsp/lru" +) + +// Simple fuzzing test for consistency. +func FuzzCache(f *testing.F) { + type op struct { + set bool + key, value byte + } + f.Fuzz(func(t *testing.T, data []byte) { + var ops []op + for len(data) >= 3 { + ops = append(ops, op{data[0]%2 == 0, data[1], data[2]}) + data = data[3:] + } + cache := lru.New(100) + var reference [256]byte + for _, op := range ops { + if op.set { + reference[op.key] = op.value + cache.Set(op.key, op.value, 1) + } else { + if v := cache.Get(op.key); v != nil && v != reference[op.key] { + t.Fatalf("cache.Get(%d) = %d, want %d", op.key, v, reference[op.key]) + } + } + } + }) +} diff --git a/gopls/internal/lsp/lru/lru_test.go b/gopls/internal/lsp/lru/lru_test.go new file mode 100644 index 00000000000..165a64780cb --- /dev/null +++ b/gopls/internal/lsp/lru/lru_test.go @@ -0,0 +1,156 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lru_test + +import ( + "bytes" + cryptorand "crypto/rand" + "fmt" + "log" + mathrand "math/rand" + "strings" + "testing" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/gopls/internal/lsp/lru" +) + +type any = interface{} // TODO: remove once gopls only builds at go1.18+ + +func TestCache(t *testing.T) { + type get struct { + key string + want any + } + type set struct { + key, value string + } + + tests := []struct { + label string + steps []any + }{ + {"empty cache", []any{ + get{"a", nil}, + get{"b", nil}, + }}, + {"zero-length string", []any{ + set{"a", ""}, + get{"a", ""}, + }}, + {"under capacity", []any{ + set{"a", "123"}, + set{"b", "456"}, + get{"a", "123"}, + get{"b", "456"}, + }}, + {"over capacity", []any{ + set{"a", "123"}, + set{"b", "456"}, + set{"c", "78901"}, + get{"a", nil}, + get{"b", "456"}, + get{"c", "78901"}, + }}, + {"access ordering", []any{ + set{"a", "123"}, + set{"b", "456"}, + get{"a", "123"}, + set{"c", "78901"}, + get{"a", "123"}, + get{"b", nil}, + get{"c", "78901"}, + }}, + } + + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + c := lru.New(10) + for i, step := range test.steps { + switch step := step.(type) { + case get: + if got := c.Get(step.key); got != step.want { + t.Errorf("#%d: c.Get(%q) = %q, want %q", i, step.key, got, step.want) + } + case set: + c.Set(step.key, step.value, len(step.value)) + } + } + }) + } +} + +// TestConcurrency exercises concurrent access to the same entry. +// +// It is a copy of TestConcurrency from the filecache package. +func TestConcurrency(t *testing.T) { + key := uniqueKey() + const N = 100 // concurrency level + + // Construct N distinct values, each larger + // than a typical 4KB OS file buffer page. + var values [N][8192]byte + for i := range values { + if _, err := mathrand.Read(values[i][:]); err != nil { + t.Fatalf("rand: %v", err) + } + } + + cache := lru.New(100 * 1e6) // 100MB cache + + // get calls Get and verifies that the cache entry + // matches one of the values passed to Set. + get := func(mustBeFound bool) error { + got := cache.Get(key) + if got == nil { + if !mustBeFound { + return nil + } + return fmt.Errorf("Get did not return a value") + } + gotBytes := got.([]byte) + for _, want := range values { + if bytes.Equal(want[:], gotBytes) { + return nil // a match + } + } + return fmt.Errorf("Get returned a value that was never Set") + } + + // Perform N concurrent calls to Set and Get. + // All sets must succeed. + // All gets must return nothing, or one of the Set values; + // there is no third possibility. + var group errgroup.Group + for i := range values { + i := i + v := values[i][:] + group.Go(func() error { + cache.Set(key, v, len(v)) + return nil + }) + group.Go(func() error { return get(false) }) + } + if err := group.Wait(); err != nil { + if strings.Contains(err.Error(), "operation not supported") || + strings.Contains(err.Error(), "not implemented") { + t.Skipf("skipping: %v", err) + } + t.Fatal(err) + } + + // A final Get must report one of the values that was Set. + if err := get(true); err != nil { + t.Fatalf("final Get failed: %v", err) + } +} + +// uniqueKey returns a key that has never been used before. +func uniqueKey() (key [32]byte) { + if _, err := cryptorand.Read(key[:]); err != nil { + log.Fatalf("rand: %v", err) + } + return +} From e7048d5182ef6390ef0d19403cc3fbaaf5a77e00 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 9 May 2023 22:35:11 -0400 Subject: [PATCH 030/109] gopls/internal/lsp/filecache: front with a 100MB in-memory LRU cache Put a 100MB in-memory LRU cache in front of the filecache, to reduce I/O for repeated cache access, such as we observe with workspace diagnostics or implements queries. Updates golang/go#60089 For golang/go#57987 Change-Id: I01a1fcca7dcf26416d4cdb578a7a2674765c9f08 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494100 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/lsp/filecache/filecache.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index 9433245288c..c4e2ce4a27f 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -40,6 +40,7 @@ import ( "time" "golang.org/x/tools/gopls/internal/bug" + "golang.org/x/tools/gopls/internal/lsp/lru" "golang.org/x/tools/internal/lockedfile" ) @@ -51,11 +52,28 @@ func Start() { go getCacheDir() } +// As an optimization, use a 100MB in-memory LRU cache in front of filecache +// operations. This reduces I/O for operations such as diagnostics or +// implementations that repeatedly access the same cache entries. +var memCache = lru.New(100 * 1e6) + +type memKey struct { + kind string + key [32]byte +} + // Get retrieves from the cache and returns a newly allocated // copy of the value most recently supplied to Set(kind, key), // possibly by another process. // Get returns ErrNotFound if the value was not found. func Get(kind string, key [32]byte) ([]byte, error) { + // First consult the read-through memory cache. + // Note that memory cache hits do not update the times + // used for LRU eviction of the file-based cache. + if value := memCache.Get(memKey{kind, key}); value != nil { + return value.([]byte), nil + } + iolimit <- struct{}{} // acquire a token defer func() { <-iolimit }() // release a token @@ -112,6 +130,7 @@ func Get(kind string, key [32]byte) ([]byte, error) { return nil, fmt.Errorf("failed to update access time: %w", err) } + memCache.Set(memKey{kind, key}, value, len(value)) return value, nil } @@ -121,6 +140,8 @@ var ErrNotFound = fmt.Errorf("not found") // Set updates the value in the cache. func Set(kind string, key [32]byte, value []byte) error { + memCache.Set(memKey{kind, key}, value, len(value)) + iolimit <- struct{}{} // acquire a token defer func() { <-iolimit }() // release a token From 3b25dbdddd467eeff02f19712879235994fee2d8 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 12 May 2023 12:01:10 -0700 Subject: [PATCH 031/109] go/ssa: use core types for array length Updates len, cap and range built-ins to use the array length based on the core types of the array or pointer to array. These are not constant in the language but can be treated as constants during translation. Change-Id: Iec9ca61edfd51a503a4fe99e57562bff1eb56309 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494977 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Tim King --- go/ssa/builder.go | 13 +++---- go/ssa/builder_generic_test.go | 64 ++++++++++++++++++++++++++++++++-- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/go/ssa/builder.go b/go/ssa/builder.go index 3ad4f391bdc..772edd61692 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -373,10 +373,8 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // We must still evaluate the value, though. (If it // was side-effect free, the whole call would have // been constant-folded.) - // - // Type parameters are always non-constant so use Underlying. - t, _ := deptr(fn.typeOf(args[0])) - if at, ok := t.Underlying().(*types.Array); ok { + t, _ := deref(fn.typeOf(args[0])) + if at, ok := typeparams.CoreType(t).(*types.Array); ok { b.expr(fn, args[0]) // for effects only return intConst(at.Len()) } @@ -1884,17 +1882,14 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // Determine number of iterations. var length Value - dt, _ := deptr(x.Type()) - if arr, ok := dt.Underlying().(*types.Array); ok { + dt, _ := deref(x.Type()) + if arr, ok := typeparams.CoreType(dt).(*types.Array); ok { // For array or *array, the number of iterations is // known statically thanks to the type. We avoid a // data dependence upon x, permitting later dead-code // elimination if x is pure, static unrolling, etc. // Ranging over a nil *array may have >0 iterations. // We still generate code for x, in case it has effects. - // - // TypeParams do not have constant length. Use underlying instead of core type. - // TODO: check if needed. length = intConst(arr.Len()) } else { // length = len(x). diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 4c3ac171a3b..7187c3521a2 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -436,6 +436,57 @@ func TestGenericBodies(t *testing.T) { c := *(any(v).(*C)); print(c) /*@ types("p23.C")*/ } `, + ` + package p24 + + func a[T any](f func() [4]T) { + x := len(f()) + print(x) /*@ types("int")*/ + } + + func b[T [4]any](f func() T) { + x := len(f()) + print(x) /*@ types("int")*/ + } + + func c[T any](f func() *[4]T) { + x := len(f()) + print(x) /*@ types("int")*/ + } + + func d[T *[4]any](f func() T) { + x := len(f()) + print(x) /*@ types("int")*/ + } + `, + ` + package p25 + + func a[T any]() { + var f func() [4]T + for i, v := range f() { + print(i, v) /*@ types("int", "T")*/ + } + } + + func b[T [4]any](f func() T) { + for i, v := range f() { + print(i, v) /*@ types("int", "any")*/ + } + } + + func c[T any](f func() *[4]T) { + for i, v := range f() { + print(i, v) /*@ types("int", "T")*/ + } + } + + func d[T *[4]any](f func() T) { + for i, v := range f() { + print(i, v) /*@ types("int", "any")*/ + } + } + `, } { contents := contents pkgname := packageName(t, contents) @@ -465,7 +516,7 @@ func TestGenericBodies(t *testing.T) { p.Build() // Collect calls to the builtin print function. - probes := make(map[*ssa.CallCommon]bool) + probes := make(map[*ssa.CallCommon]*ssa.Function) for _, mem := range p.Members { if fn, ok := mem.(*ssa.Function); ok { for _, bb := range fn.Blocks { @@ -473,7 +524,7 @@ func TestGenericBodies(t *testing.T) { if i, ok := i.(ssa.CallInstruction); ok { call := i.Common() if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" { - probes[i.Common()] = true + probes[i.Common()] = fn } } } @@ -517,6 +568,7 @@ func TestGenericBodies(t *testing.T) { } if got, want := fmt.Sprint(args), fmt.Sprint(note.Args); got != want { t.Errorf("Arguments to print() were expected to be %q. got %q", want, got) + logFunction(t, probes[call]) } } }) @@ -568,6 +620,14 @@ func TestInstructionString(t *testing.T) { _ = x.foo _ = x.foo() } + + //@ instrs("f4", "*ssa.BinOp", "t1 + 1:int", "t2 < 4:int") + //@ instrs("f4", "*ssa.Call", "f()", "print(t2, t4)") + func f4[T [4]string](f func() T) { + for i, v := range f() { + print(i, v) + } + } ` // Parse From f4e8a711ac3fe3aebe2027ccf888b84eed960601 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 12 May 2023 13:37:37 -0700 Subject: [PATCH 032/109] go/ssa: use core type for composite literal addresses Dereferences using the core type during compLit and when creating addresses for composite literals. Also adds a new utility fieldOf for selecting a field from a type whose core type is a struct. Change-Id: I2fd0a1caf99819d0b9be5f3ba79a00f8053565e3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/494978 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Tim King Reviewed-by: Alan Donovan --- go/ssa/builder.go | 55 ++++++++---------------------- go/ssa/builder_generic_test.go | 62 ++++++++++++++++++++++++++++++++++ go/ssa/emit.go | 20 ++++------- go/ssa/print.go | 11 +++--- go/ssa/util.go | 11 ++++++ 5 files changed, 97 insertions(+), 62 deletions(-) diff --git a/go/ssa/builder.go b/go/ssa/builder.go index 772edd61692..8931fb46fc7 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -429,12 +429,12 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { return &address{addr: v, pos: e.Pos(), expr: e} case *ast.CompositeLit: - t, _ := deptr(fn.typeOf(e)) + typ, _ := deref(fn.typeOf(e)) var v *Alloc if escaping { - v = emitNew(fn, t, e.Lbrace) + v = emitNew(fn, typ, e.Lbrace) } else { - v = fn.addLocal(t, e.Lbrace) + v = fn.addLocal(typ, e.Lbrace) } v.Comment = "complit" var sb storebuf @@ -457,8 +457,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { wantAddr := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) index := sel.index[len(sel.index)-1] - dt, _ := deptr(v.Type()) - fld := typeparams.CoreType(dt).(*types.Struct).Field(index) + fld := fieldOf(mustDeref(v.Type()), index) // v is an addr. // Due to the two phases of resolving AssignStmt, a panic from x.f = p() // when x is nil is required to come after the side-effects of @@ -553,7 +552,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // so if the type of the location is a pointer, // an &-operation is implied. if _, ok := loc.(blank); !ok { // avoid calling blank.typ() - if _, ok := deptr(loc.typ()); ok { + if _, ok := deref(loc.typ()); ok { ptr := b.addr(fn, e, true).address(fn) // copy address if sb != nil { @@ -583,7 +582,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // Subtle: emit debug ref for aggregate types only; // slice and map are handled by store ops in compLit. - switch loc.typ().Underlying().(type) { // TODO(taking): check if Underlying() appropriate. + switch typeparams.CoreType(loc.typ()).(type) { case *types.Struct, *types.Array: emitDebugRef(fn, e, addr, true) } @@ -1253,39 +1252,13 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ, _ := deptr(fn.typeOf(e)) // type with name [may be type param] - t, _ := deptr(typeparams.CoreType(typ)) // core type for comp lit case - t = t.Underlying() - - // Computing typ and t is subtle as these handle pointer types. - // For example, &T{...} is valid even for maps and slices. - // Also typ should refer to T (not *T) while t should be the core type of T. - // - // To show the ordering to take into account, consider the composite literal - // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here: - // type N struct{f int} - // func _[T N, S *N]() { - // _ = &T{f: 1} - // _ = []S{{f: 1}} - // } - // For `&T{f: 1}`, we compute `typ` and `t` as: - // typeOf(&T{f: 1}) == *T - // deref(*T) == T (typ) - // CoreType(T) == N - // deref(N) == N - // N.Underlying() == struct{f int} (t) - // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as: - // typeOf({f: 1}) == S - // deref(S) == S (typ) - // CoreType(S) == *N - // deref(*N) == N - // N.Underlying() == struct{f int} (t) - switch t := t.(type) { + typ, _ := deref(fn.typeOf(e)) // type with name [may be type param] + switch t := typeparams.CoreType(typ).(type) { case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear - dt, _ := deptr(addr.Type()) - sb.store(&address{addr, e.Lbrace, nil}, zeroConst(dt)) + zt, _ := deref(addr.Type()) + sb.store(&address{addr, e.Lbrace, nil}, zeroConst(zt)) isZero = true } for i, e := range e.Elts { @@ -1329,8 +1302,8 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero if !isZero && int64(len(e.Elts)) != at.Len() { // memclear - dt, _ := deptr(array.Type()) - sb.store(&address{array, e.Lbrace, nil}, zeroConst(dt)) + zt, _ := deref(array.Type()) + sb.store(&address{array, e.Lbrace, nil}, zeroConst(zt)) } } @@ -1385,7 +1358,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // map[*struct{}]bool{&struct{}{}: true} wantAddr := false if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { - _, wantAddr = t.Key().Underlying().(*types.Pointer) + _, wantAddr = deref(t.Key()) } var key Value @@ -1416,7 +1389,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) default: - panic("unexpected CompositeLit type: " + t.String()) + panic("unexpected CompositeLit type: " + typ.String()) } } diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 7187c3521a2..77de3268bc1 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -607,6 +607,13 @@ func TestInstructionString(t *testing.T) { return u } + //@ instrs("f1b", "*ssa.Alloc", "new T (complit)") + //@ instrs("f1b", "*ssa.FieldAddr", "&t0.x [#0]") + func f1b[T ~struct{ x string }]() *T { + u := &T{"lorem"} + return u + } + //@ instrs("f2", "*ssa.TypeAssert", "typeassert t0.(interface{})") //@ instrs("f2", "*ssa.Call", "invoke x.foo()") func f2[T interface{ foo() string }](x T) { @@ -628,6 +635,61 @@ func TestInstructionString(t *testing.T) { print(i, v) } } + + //@ instrs("f5", "*ssa.Call", "nil:func()()") + func f5() { + var f func() + f() + } + + type S struct{ f int } + + //@ instrs("f6", "*ssa.Alloc", "new [1]P (slicelit)", "new S (complit)") + //@ instrs("f6", "*ssa.IndexAddr", "&t0[0:int]") + //@ instrs("f6", "*ssa.FieldAddr", "&t2.f [#0]") + func f6[P *S]() []P { return []P{{f: 1}} } + + //@ instrs("f7", "*ssa.Alloc", "local S (complit)") + //@ instrs("f7", "*ssa.FieldAddr", "&t0.f [#0]") + func f7[T any, S struct{f T}](x T) S { return S{f: x} } + + //@ instrs("f8", "*ssa.Alloc", "new [1]P (slicelit)", "new struct{f T} (complit)") + //@ instrs("f8", "*ssa.IndexAddr", "&t0[0:int]") + //@ instrs("f8", "*ssa.FieldAddr", "&t2.f [#0]") + func f8[T any, P *struct{f T}](x T) []P { return []P{{f: x}} } + + //@ instrs("f9", "*ssa.Alloc", "new [1]PS (slicelit)", "new S (complit)") + //@ instrs("f9", "*ssa.IndexAddr", "&t0[0:int]") + //@ instrs("f9", "*ssa.FieldAddr", "&t2.f [#0]") + func f9[T any, S struct{f T}, PS *S](x T) { + _ = []PS{{f: x}} + } + + //@ instrs("f10", "*ssa.FieldAddr", "&t0.x [#0]") + //@ instrs("f10", "*ssa.Store", "*t0 = *new(T):T", "*t1 = 4:int") + func f10[T ~struct{ x, y int }]() T { + var u T + u = T{x: 4} + return u + } + + //@ instrs("f11", "*ssa.FieldAddr", "&t1.y [#1]") + //@ instrs("f11", "*ssa.Store", "*t1 = *new(T):T", "*t2 = 5:int") + func f11[T ~struct{ x, y int }, PT *T]() PT { + var u PT = new(T) + *u = T{y: 5} + return u + } + + //@ instrs("f12", "*ssa.Alloc", "new struct{f T} (complit)") + //@ instrs("f12", "*ssa.MakeMap", "make map[P]bool 1:int") + func f12[T any, P *struct{f T}](x T) map[P]bool { return map[P]bool{{}: true} } + + //@ instrs("f13", "&v[0:int]") + //@ instrs("f13", "*ssa.Store", "*t0 = 7:int", "*v = *new(A):A") + func f13[A [3]int, PA *A](v PA) { + *v = A{7} + } ` // Parse diff --git a/go/ssa/emit.go b/go/ssa/emit.go index d402e676b20..80e30b6c215 100644 --- a/go/ssa/emit.go +++ b/go/ssa/emit.go @@ -11,8 +11,6 @@ import ( "go/ast" "go/token" "go/types" - - "golang.org/x/tools/internal/typeparams" ) // emitNew emits to f a new (heap Alloc) instruction allocating an @@ -478,9 +476,8 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - st, vptr := deptr(v.Type()) - fld := typeparams.CoreType(st).(*types.Struct).Field(index) - if vptr { + if st, vptr := deptr(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -493,6 +490,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, @@ -512,15 +510,8 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - // TODO(taking): Cover the following cases of interest - // func f[T any, S struct{f T}, P *struct{f T}, PS *S](x T) { - // _ := S{f: x} - // _ := P{f: x} - // _ := PS{f: x} - // } - st, vptr := deptr(v.Type()) - fld := typeparams.CoreType(st).(*types.Struct).Field(index) - if vptr { + if st, vptr := deptr(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -533,6 +524,7 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, diff --git a/go/ssa/print.go b/go/ssa/print.go index e47e51637d4..7f34a7b58b7 100644 --- a/go/ssa/print.go +++ b/go/ssa/print.go @@ -259,22 +259,19 @@ func (v *MakeChan) String() string { } func (v *FieldAddr) String() string { - dt, _ := deptr(v.X.Type()) - st := typeparams.CoreType(dt).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(mustDeref(v.X.Type()), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) } func (v *Field) String() string { - st := typeparams.CoreType(v.X.Type()).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(v.X.Type(), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) } diff --git a/go/ssa/util.go b/go/ssa/util.go index 53a74877bcb..7735dd8e98d 100644 --- a/go/ssa/util.go +++ b/go/ssa/util.go @@ -128,6 +128,17 @@ func recvType(obj *types.Func) types.Type { return obj.Type().(*types.Signature).Recv().Type() } +// fieldOf returns the index'th field of the (core type of) a struct type; +// otherwise returns nil. +func fieldOf(typ types.Type, index int) *types.Var { + if st, ok := typeparams.CoreType(typ).(*types.Struct); ok { + if 0 <= index && index < st.NumFields() { + return st.Field(index) + } + } + return nil +} + // isUntyped returns true for types that are untyped. func isUntyped(typ types.Type) bool { b, ok := typ.(*types.Basic) From e5c8d4db75e9bf4148e4166bb51b25b76b4b1df6 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 15 May 2023 19:50:02 -0400 Subject: [PATCH 033/109] gopls/internal/lsp/cache: unexport ValidBuildConfiguration In general, the source package should not need to know about build configurations. Remove this unused interface method, and unexport. Change-Id: I91ba24e5bc2d172046ccaf3c46d2eb3f70e7bc42 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495057 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Alan Donovan --- gopls/internal/lsp/cache/load.go | 4 ++-- gopls/internal/lsp/cache/snapshot.go | 10 +++++----- gopls/internal/lsp/cache/view.go | 2 +- gopls/internal/lsp/source/view.go | 5 ----- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index 521dc1ee63e..111b0743cb9 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -90,7 +90,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc case viewLoadScope: // If we are outside of GOPATH, a module, or some other known // build system, don't load subdirectories. - if !s.ValidBuildConfiguration() { + if !s.validBuildConfiguration() { query = append(query, "./") } else { query = append(query, "./...") @@ -359,7 +359,7 @@ func (s *snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.D // If the snapshot does not have a valid build configuration, it may be // that the user has opened a directory that contains multiple modules. // Check for that an warn about it. - if !s.ValidBuildConfiguration() { + if !s.validBuildConfiguration() { var msg string if s.view.goversion >= 18 { msg = `gopls was not able to find modules in your workspace. diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 45a32a142ff..7988a72b738 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -321,7 +321,7 @@ func (s *snapshot) Templates() map[span.URI]source.FileHandle { return tmpls } -func (s *snapshot) ValidBuildConfiguration() bool { +func (s *snapshot) validBuildConfiguration() bool { // Since we only really understand the `go` command, if the user has a // different GOPACKAGESDRIVER, assume that their configuration is valid. if s.view.hasGopackagesDriver { @@ -379,7 +379,7 @@ func (s *snapshot) workspaceMode() workspaceMode { // If the view has an invalid configuration, don't build the workspace // module. - validBuildConfiguration := s.ValidBuildConfiguration() + validBuildConfiguration := s.validBuildConfiguration() if !validBuildConfiguration { return mode } @@ -1448,8 +1448,8 @@ const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH If you are using modules, please open your editor to a directory in your module. If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.` -func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, active []*source.Metadata) string { - if !snapshot.ValidBuildConfiguration() { +func shouldShowAdHocPackagesWarning(snapshot *snapshot, active []*source.Metadata) string { + if !snapshot.validBuildConfiguration() { for _, m := range active { // A blank entry in DepsByImpPath // indicates a missing dependency. @@ -1559,7 +1559,7 @@ func (s *snapshot) reloadWorkspace(ctx context.Context) error { // If the view's build configuration is invalid, we cannot reload by // package path. Just reload the directory instead. - if !s.ValidBuildConfiguration() { + if !s.validBuildConfiguration() { scopes = []loadScope{viewLoadScope("LOAD_INVALID_VIEW")} } diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index db2c1dc34f0..be2879d277e 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -424,7 +424,7 @@ func viewEnv(v *View) string { v.folder.Filename(), v.workingDir().Filename(), strings.TrimRight(v.workspaceInformation.goversionOutput, "\n"), - v.snapshot.ValidBuildConfiguration(), + v.snapshot.validBuildConfiguration(), buildFlags, v.goEnv, ) diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index e77f9d2dec8..6dd3811a0a5 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -63,11 +63,6 @@ type Snapshot interface { // on behalf of this snapshot. BackgroundContext() context.Context - // ValidBuildConfiguration returns true if there is some error in the - // user's workspace. In particular, if they are both outside of a module - // and their GOPATH. - ValidBuildConfiguration() bool - // A Snapshot is a caching implementation of FileSource whose // ReadFile method returns consistent information about the existence // and content of each file throughout its lifetime. From a0593829f125fd78c05e723c8499030d3de2aa6c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 15 May 2023 19:54:19 -0400 Subject: [PATCH 034/109] gopls/internal/lsp/cache: fail workspace load on context cancellation If the context was cancelled early during initialization, it was possible that all module load scopes get skipped, because ParseMod returns the context error. As a result, the subsequent load would succeed trivially, even though the context was cancelled. After analyzing the reinitialization codepath, it seems very plausible to encounter this race, and it explains the flakiness of TestReinitializeRepeatedly -- I have found nothing else that would explain the failure mode observed in logs, that the reload bypasses initialization. Fix this by returning when context errors are encountered. Fixes golang/go#57780 Change-Id: I3fb971503f280131c59146bc586da45dd2ed1126 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495058 gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan Run-TryBot: Robert Findley --- gopls/internal/lsp/cache/view.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index be2879d277e..d9991704a73 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -752,16 +752,18 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) (loadEr // errors. fh, err := s.ReadFile(ctx, modURI) if err != nil { - if ctx.Err() == nil { - addError(modURI, err) + if ctx.Err() != nil { + return ctx.Err() } + addError(modURI, err) continue } parsed, err := s.ParseMod(ctx, fh) if err != nil { - if ctx.Err() == nil { - addError(modURI, err) + if ctx.Err() != nil { + return ctx.Err() } + addError(modURI, err) continue } if parsed.File == nil || parsed.File.Module == nil { From 522243a7165d913f0905fc907e8d943d288e3fde Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 16 May 2023 13:18:26 -0400 Subject: [PATCH 035/109] go/ssa/interp: avoid hard-coding GOOS and GOARCH GOOS and GOARCH are not necessarily present in the environment at all, so overriding them in interp doesn't have a clear purpose, especially given that CL 495255 no longer overrides them in the build context. Unfortunately, the fake (reduced) GOROOT/src used for the test also hard-coded the runtime constants for GOOS and GOARCH. Since in general the testdata directory is not writable (it may be in the module cache), we need to copy the fake GOROOT into a testdata directory in order to modify it. This change also deletes an unused function that I noticed while investigating. Fixes golang/go#60226 (maybe). Change-Id: I409292af76d411efb8a3b4ca6d8d929b53325610 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495258 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Auto-Submit: Bryan Mills gopls-CI: kokoro Run-TryBot: Bryan Mills --- go/ssa/interp/external.go | 4 -- go/ssa/interp/interp.go | 9 --- go/ssa/interp/interp_test.go | 66 +++++++++++++++++-- go/ssa/interp/testdata/src/runtime/runtime.go | 3 - 4 files changed, 60 insertions(+), 22 deletions(-) diff --git a/go/ssa/interp/external.go b/go/ssa/interp/external.go index 51b3be0bda3..7a79924e3fb 100644 --- a/go/ssa/interp/external.go +++ b/go/ssa/interp/external.go @@ -312,10 +312,6 @@ func ext۰os۰Getenv(fr *frame, args []value) value { switch name { case "GOSSAINTERP": return "1" - case "GOARCH": - return "amd64" - case "GOOS": - return "linux" } return os.Getenv(name) } diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go index 58cac464241..79363f57362 100644 --- a/go/ssa/interp/interp.go +++ b/go/ssa/interp/interp.go @@ -635,15 +635,6 @@ func doRecover(caller *frame) value { return iface{} } -// setGlobal sets the value of a system-initialized global variable. -func setGlobal(i *interpreter, pkg *ssa.Package, name string, v value) { - if g, ok := i.globals[pkg.Var(name)]; ok { - *g = v - return - } - panic("no global variable: " + pkg.Pkg.Path() + "." + name) -} - // Interpret interprets the Go program whose main package is mainpkg. // mode specifies various interpreter options. filename and args are // the initial values of os.Args for the target program. sizes is the diff --git a/go/ssa/interp/interp_test.go b/go/ssa/interp/interp_test.go index 817b4585ea3..64ede78ae27 100644 --- a/go/ssa/interp/interp_test.go +++ b/go/ssa/interp/interp_test.go @@ -138,9 +138,14 @@ func init() { testdataTests = append(testdataTests, "typeassert.go") testdataTests = append(testdataTests, "zeros.go") } + + // GOROOT/test used to assume that GOOS and GOARCH were explicitly set in the + // environment, so do that here for TestGorootTest. + os.Setenv("GOOS", runtime.GOOS) + os.Setenv("GOARCH", runtime.GOARCH) } -func run(t *testing.T, input string) { +func run(t *testing.T, input string, goroot string) { // The recover2 test case is broken on Go 1.14+. See golang/go#34089. // TODO(matloob): Fix this. if filepath.Base(input) == "recover2.go" { @@ -151,8 +156,8 @@ func run(t *testing.T, input string) { start := time.Now() - ctx := build.Default // copy - ctx.GOROOT = "testdata" // fake goroot + ctx := build.Default // copy + ctx.GOROOT = goroot ctx.GOOS = runtime.GOOS ctx.GOARCH = runtime.GOARCH if filepath.Base(input) == "width32.go" && unsafe.Sizeof(int(0)) > 4 { @@ -222,24 +227,72 @@ func run(t *testing.T, input string) { } } +// makeGoroot copies testdata/src into the "src" directory of a temporary +// location to mimic GOROOT/src, and adds a file "runtime/consts.go" containing +// declarations for GOOS and GOARCH that match the GOOS and GOARCH of this test. +// +// It returns the directory that should be used for GOROOT. +func makeGoroot(t *testing.T) string { + goroot := t.TempDir() + src := filepath.Join(goroot, "src") + + err := filepath.Walk("testdata/src", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + rel, err := filepath.Rel("testdata/src", path) + if err != nil { + return err + } + targ := filepath.Join(src, rel) + + if info.IsDir() { + return os.Mkdir(targ, info.Mode().Perm()|0700) + } + + b, err := os.ReadFile(path) + if err != nil { + return err + } + return os.WriteFile(targ, b, info.Mode().Perm()) + }) + if err != nil { + t.Fatal(err) + } + + constsGo := fmt.Sprintf(`package runtime +const GOOS = %q +const GOARCH = %q +`, runtime.GOOS, runtime.GOARCH) + err = os.WriteFile(filepath.Join(src, "runtime/consts.go"), []byte(constsGo), 0644) + if err != nil { + t.Fatal(err) + } + + return goroot +} + // TestTestdataFiles runs the interpreter on testdata/*.go. func TestTestdataFiles(t *testing.T) { + goroot := makeGoroot(t) cwd, err := os.Getwd() if err != nil { log.Fatal(err) } for _, input := range testdataTests { t.Run(input, func(t *testing.T) { - run(t, filepath.Join(cwd, "testdata", input)) + run(t, filepath.Join(cwd, "testdata", input), goroot) }) } } // TestGorootTest runs the interpreter on $GOROOT/test/*.go. func TestGorootTest(t *testing.T) { + goroot := makeGoroot(t) for _, input := range gorootTestTests { t.Run(input, func(t *testing.T) { - run(t, filepath.Join(build.Default.GOROOT, "test", input)) + run(t, filepath.Join(build.Default.GOROOT, "test", input), goroot) }) } } @@ -251,6 +304,7 @@ func TestTypeparamTest(t *testing.T) { if !typeparams.Enabled { return } + goroot := makeGoroot(t) // Skip known failures for the given reason. // TODO(taking): Address these. @@ -295,7 +349,7 @@ func TestTypeparamTest(t *testing.T) { t.Skipf("skipping: %s", reason) } - run(t, input) + run(t, input, goroot) }) } } diff --git a/go/ssa/interp/testdata/src/runtime/runtime.go b/go/ssa/interp/testdata/src/runtime/runtime.go index c60c7fc29a3..f94684befd8 100644 --- a/go/ssa/interp/testdata/src/runtime/runtime.go +++ b/go/ssa/interp/testdata/src/runtime/runtime.go @@ -16,7 +16,4 @@ type Error interface { RuntimeError() } -const GOOS = "linux" -const GOARCH = "amd64" - func GC() From 743372f585829bd9421079498f4de2f77f50eb5a Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 16 May 2023 15:32:00 -0400 Subject: [PATCH 036/109] gopls/internal/lsp/debug: limit to 100 recent trace entries Before this CL, we would keep traces for up to a minute, even if we exceeded the maximum number of recent traces. This could lead to an unusable trace debug page, as traces can be very chatty during large operations. Change the condition to enforce a strict limit on the number of traces to keep, and remove age-based eviction. Change-Id: Ie9b44e2c5ef236c3e23e3eb21b7eb55da74295da Reviewed-on: https://go-review.googlesource.com/c/tools/+/495259 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Auto-Submit: Robert Findley --- gopls/internal/lsp/debug/trace.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/gopls/internal/lsp/debug/trace.go b/gopls/internal/lsp/debug/trace.go index 48bed9d3b0f..31c5a5376ac 100644 --- a/gopls/internal/lsp/debug/trace.go +++ b/gopls/internal/lsp/debug/trace.go @@ -259,13 +259,10 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) // addRecentLocked appends a start or end event to the "recent" log, // evicting an old entry if necessary. func (t *traces) addRecentLocked(span *traceSpan, start bool) { - const ( - maxRecent = 100 // number of log entries before age-based eviction - maxAge = 1 * time.Minute - ) t.recent = append(t.recent, spanStartEnd{Start: start, Span: span}) - for len(t.recent) > maxRecent && t.recent[0].Time().Before(time.Now().Add(-maxAge)) { + const maxRecent = 100 // number of log entries before eviction + for len(t.recent) > maxRecent { t.recent[0] = spanStartEnd{} // aid GC t.recent = t.recent[1:] t.recentEvictions++ From 651d951bb69be4d2fd9f83f5b329c1a4f24742d2 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 16 May 2023 14:28:17 -0700 Subject: [PATCH 037/109] go/ssa: fix typo in package docs In go.dev/cl/457436, a new MultiConvert instruction was added, but it was documented in the package docs as GenericConvert (the name originally used in the CL). Change-Id: I576184b599431163818b5cb6acc55b2773ac9997 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495263 Run-TryBot: Matthew Dempsky Auto-Submit: Matthew Dempsky TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro --- go/ssa/doc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/ssa/doc.go b/go/ssa/doc.go index afda476b369..a687de45e26 100644 --- a/go/ssa/doc.go +++ b/go/ssa/doc.go @@ -66,7 +66,6 @@ // *FieldAddr ✔ ✔ // *FreeVar ✔ // *Function ✔ ✔ (func) -// *GenericConvert ✔ ✔ // *Global ✔ ✔ (var) // *Go ✔ // *If ✔ @@ -80,6 +79,7 @@ // *MakeMap ✔ ✔ // *MakeSlice ✔ ✔ // *MapUpdate ✔ +// *MultiConvert ✔ ✔ // *NamedConst ✔ (const) // *Next ✔ ✔ // *Panic ✔ From 242e5ed731aea726b30acd6919fb04e20db032fa Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 17 May 2023 11:23:36 -0400 Subject: [PATCH 038/109] cover: eliminate an unnecessary fsync in TestParseProfiles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CL 179377 added TestParseProfiles, and used a call to (*os.File).Sync instead of closing the file to flush its pending writes. Unfortunately, on some filesystems Sync is fabulously expensive — it may flush all pending writes everywhere on the filesystem, instead of just flushing the writes to the one file, and flushes all the way to disk even though this test really only needs tho writes to be observable in the same process. Instead, we can simplify the test significantly by using os.WriteFile to write and flush the file's contents. Fixes golang/go#57481. Change-Id: I7cda28fb6e9c8183dedadf79dbafe7e870ec0c42 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495798 TryBot-Result: Gopher Robot Reviewed-by: Than McIntosh Auto-Submit: Bryan Mills Run-TryBot: Bryan Mills --- cover/profile_test.go | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/cover/profile_test.go b/cover/profile_test.go index 3cecdacd508..925b397087a 100644 --- a/cover/profile_test.go +++ b/cover/profile_test.go @@ -6,8 +6,8 @@ package cover import ( "fmt" - "io/ioutil" "os" + "path/filepath" "reflect" "testing" ) @@ -208,26 +208,12 @@ some/fancy/path:42.69,44.16 2 -1`, for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Failed to create a temp file: %v.", err) - } - defer func() { - f.Close() - os.Remove(f.Name()) - }() - n, err := f.WriteString(tc.input) - if err != nil { - t.Fatalf("Failed to write to temp file: %v", err) - } - if n < len(tc.input) { - t.Fatalf("Didn't write enough bytes to temp file (wrote %d, expected %d).", n, len(tc.input)) - } - if err := f.Sync(); err != nil { - t.Fatalf("Failed to sync temp file: %v", err) + fname := filepath.Join(t.TempDir(), "test.cov") + if err := os.WriteFile(fname, []byte(tc.input), 0644); err != nil { + t.Fatal(err) } - result, err := ParseProfiles(f.Name()) + result, err := ParseProfiles(fname) if err != nil { if !tc.expectErr { t.Errorf("Unexpected error: %v", err) From 8b4b27bce44fbb5c7abb2e266b4a228e6a95a130 Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Wed, 17 May 2023 16:05:08 -0400 Subject: [PATCH 039/109] go/analysis/passes/slog: fix Group kv offset The first argument to Group is a key, so its value in the kvFuncs map should be 1, not zero. The test was also calling Group wrong. Change-Id: Iafb176b361d5fdc50c28cffe46f49dea29553c63 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495976 Reviewed-by: Alan Donovan Run-TryBot: Jonathan Amsterdam TryBot-Result: Gopher Robot gopls-CI: kokoro --- go/analysis/passes/slog/slog.go | 2 +- go/analysis/passes/slog/testdata/src/a/a.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go/analysis/passes/slog/slog.go b/go/analysis/passes/slog/slog.go index 874ebecf069..8429eab9358 100644 --- a/go/analysis/passes/slog/slog.go +++ b/go/analysis/passes/slog/slog.go @@ -204,7 +204,7 @@ var kvFuncs = map[string]map[string]int{ "WarnCtx": 2, "ErrorCtx": 2, "Log": 3, - "Group": 0, + "Group": 1, }, "Logger": map[string]int{ "Debug": 1, diff --git a/go/analysis/passes/slog/testdata/src/a/a.go b/go/analysis/passes/slog/testdata/src/a/a.go index a13aac773d0..aa408d0e99a 100644 --- a/go/analysis/passes/slog/testdata/src/a/a.go +++ b/go/analysis/passes/slog/testdata/src/a/a.go @@ -143,7 +143,8 @@ func All() { r.Add(1, 2) // want `slog.Record.Add arg "1" should be a string or a slog.Attr` - _ = slog.Group("a", 1, 2, 3) // want `slog.Group arg "2" should be a string or a slog.Attr` + _ = slog.Group("key", "a", 1, "b", 2) + _ = slog.Group("key", "a", 1, 2, 3) // want `slog.Group arg "2" should be a string or a slog.Attr` } From 3d53c2d209e7055e29bfbc85770ce32db02b48af Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 15 May 2023 20:03:00 -0400 Subject: [PATCH 040/109] gopls/internal/lsp/cache: fix race in adhoc reloading To avoid inconsistent state where we load command-line-arguments packages for files that would be contained in an ad-hoc package, ensure that the view is loaded before doing file loads, when in ad-hoc mode. Along the way, introduce the concept of 'ViewType' discussed in our zero-config-gopls design (golang/go#57979). Furthermore, move certain data onto the immutable workspaceInformation type: - moduleMode depends only on ViewType - inGOPATH can be precomputed Updates golang/go#57979 Fixes golang/go#57209 Change-Id: If54cea65fbc72e6e704eccc6fe59d30ae5d01069 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495256 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro --- gopls/internal/lsp/cache/check.go | 2 +- gopls/internal/lsp/cache/snapshot.go | 50 ++++------- gopls/internal/lsp/cache/view.go | 88 ++++++++++++++++++- .../internal/regtest/workspace/adhoc_test.go | 42 +++++++++ 4 files changed, 147 insertions(+), 35 deletions(-) create mode 100644 gopls/internal/regtest/workspace/adhoc_test.go diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index 83ea17788a6..cf212c6e2e0 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -1190,7 +1190,7 @@ func (s *snapshot) typeCheckInputs(ctx context.Context, m *source.Metadata) (typ relatedInformation: s.view.Options().RelatedInformationSupported, linkTarget: s.view.Options().LinkTarget, - moduleMode: s.moduleMode(), + moduleMode: s.view.moduleMode(), }, nil } diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 7988a72b738..64e1b55bbcf 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -327,47 +327,19 @@ func (s *snapshot) validBuildConfiguration() bool { if s.view.hasGopackagesDriver { return true } + // Check if the user is working within a module or if we have found // multiple modules in the workspace. if len(s.workspaceModFiles) > 0 { return true } - // The user may have a multiple directories in their GOPATH. - // Check if the workspace is within any of them. - // TODO(rfindley): this should probably be subject to "if GO111MODULES = off {...}". - for _, gp := range filepath.SplitList(s.view.gopath) { - if source.InDir(filepath.Join(gp, "src"), s.view.folder.Filename()) { - return true - } - } - return false -} - -// moduleMode reports whether the current snapshot uses Go modules. -// -// From https://go.dev/ref/mod, module mode is active if either of the -// following hold: -// - GO111MODULE=on -// - GO111MODULE=auto and we are inside a module or have a GOWORK value. -// -// Additionally, this method returns false if GOPACKAGESDRIVER is set. -// -// TODO(rfindley): use this more widely. -func (s *snapshot) moduleMode() bool { - // Since we only really understand the `go` command, if the user has a - // different GOPACKAGESDRIVER, assume that their configuration is valid. - if s.view.hasGopackagesDriver { - return false - } - switch s.view.effectiveGO111MODULE() { - case on: + // TODO(rfindley): this should probably be subject to "if GO111MODULES = off {...}". + if s.view.inGOPATH { return true - case off: - return false - default: - return len(s.workspaceModFiles) > 0 || s.view.gowork != "" } + + return false } // workspaceMode describes the way in which the snapshot's workspace should @@ -763,6 +735,18 @@ func (s *snapshot) MethodSets(ctx context.Context, ids ...PackageID) ([]*methods } func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]*source.Metadata, error) { + if s.view.ViewType() == AdHocView { + // As described in golang/go#57209, in ad-hoc workspaces (where we load ./ + // rather than ./...), preempting the directory load with file loads can + // lead to an inconsistent outcome, where certain files are loaded with + // command-line-arguments packages and others are loaded only in the ad-hoc + // package. Therefore, ensure that the workspace is loaded before doing any + // file loads. + if err := s.awaitLoaded(ctx); err != nil { + return nil, err + } + } + s.mu.Lock() // Start with the set of package associations derived from the last load. diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index d9991704a73..1dc13aaee8c 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -130,6 +130,10 @@ type workspaceInformation struct { // GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on // their machine. hasGopackagesDriver bool + + // inGOPATH reports whether the workspace directory is contained in a GOPATH + // directory. + inGOPATH bool } // effectiveGO111MODULE reports the value of GO111MODULE effective in the go @@ -145,6 +149,79 @@ func (w workspaceInformation) effectiveGO111MODULE() go111module { } } +// A ViewType describes how we load package information for a view. +// +// This is used for constructing the go/packages.Load query, and for +// interpreting missing packages, imports, or errors. +// +// Each view has a ViewType which is derived from its immutable workspace +// information -- any environment change that would affect the view type +// results in a new view. +type ViewType int + +const ( + // GoPackagesDriverView is a view with a non-empty GOPACKAGESDRIVER + // environment variable. + GoPackagesDriverView ViewType = iota + + // GOPATHView is a view in GOPATH mode. + // + // I.e. in GOPATH, with GO111MODULE=off, or GO111MODULE=auto with no + // go.mod file. + GOPATHView + + // GoModuleView is a view in module mode with a single Go module. + GoModuleView + + // GoWorkView is a view in module mode with a go.work file. + GoWorkView + + // An AdHocView is a collection of files in a given directory, not in GOPATH + // or a module. + AdHocView +) + +// ViewType derives the type of the view from its workspace information. +// +// TODO(rfindley): this logic is overlapping and slightly inconsistent with +// validBuildConfiguration. As part of zero-config-gopls (golang/go#57979), fix +// this inconsistency and consolidate on the ViewType abstraction. +func (w workspaceInformation) ViewType() ViewType { + if w.hasGopackagesDriver { + return GoPackagesDriverView + } + go111module := w.effectiveGO111MODULE() + if w.gowork != "" && go111module != off { + return GoWorkView + } + if w.gomod != "" && go111module != off { + return GoModuleView + } + if w.inGOPATH && go111module != on { + return GOPATHView + } + return AdHocView +} + +// moduleMode reports whether the current snapshot uses Go modules. +// +// From https://go.dev/ref/mod, module mode is active if either of the +// following hold: +// - GO111MODULE=on +// - GO111MODULE=auto and we are inside a module or have a GOWORK value. +// +// Additionally, this method returns false if GOPACKAGESDRIVER is set. +// +// TODO(rfindley): use this more widely. +func (w workspaceInformation) moduleMode() bool { + switch w.ViewType() { + case GoModuleView, GoWorkView: + return true + default: + return false + } +} + // GOWORK returns the effective GOWORK value for this workspace, if // any, in URI form. // @@ -740,6 +817,8 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) (loadEr }) } + // TODO(rfindley): this should be predicated on the s.view.moduleMode(). + // There is no point loading ./... if we have an empty go.work. if len(s.workspaceModFiles) > 0 { for modURI := range s.workspaceModFiles { // Verify that the modfile is valid before trying to load it. @@ -881,7 +960,7 @@ func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, if err != nil { return info, err } - if err := info.goEnv.load(ctx, folder.Filename(), options.EnvSlice(), s.gocmdRunner); err != nil { + if err := info.load(ctx, folder.Filename(), options.EnvSlice(), s.gocmdRunner); err != nil { return info, err } // The value of GOPACKAGESDRIVER is not returned through the go command. @@ -899,6 +978,13 @@ func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, return info, err } + // Check if the workspace is within any GOPATH directory. + for _, gp := range filepath.SplitList(info.gopath) { + if source.InDir(filepath.Join(gp, "src"), folder.Filename()) { + info.inGOPATH = true + break + } + } return info, nil } diff --git a/gopls/internal/regtest/workspace/adhoc_test.go b/gopls/internal/regtest/workspace/adhoc_test.go new file mode 100644 index 00000000000..d726242c48d --- /dev/null +++ b/gopls/internal/regtest/workspace/adhoc_test.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package workspace + +import ( + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" + "golang.org/x/tools/internal/testenv" +) + +// Test for golang/go#57209: editing a file in an ad-hoc package should not +// trigger conflicting diagnostics. +func TestAdhoc_Edits(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + + const files = ` +-- a.go -- +package foo + +const X = 1 + +-- b.go -- +package foo + +// import "errors" + +const Y = X +` + + Run(t, files, func(t *testing.T, env *Env) { + env.OpenFile("b.go") + + for i := 0; i < 10; i++ { + env.RegexpReplace("b.go", `// import "errors"`, `import "errors"`) + env.RegexpReplace("b.go", `import "errors"`, `// import "errors"`) + env.AfterChange(NoDiagnostics()) + } + }) +} From a069704d01a7b95c33b311cd2eae5fcea1a6dac2 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 17 May 2023 12:23:15 -0400 Subject: [PATCH 041/109] gopls/internal/lsp/filecache: avoid flock This CL changes the implementation of the filecache to use a scheme similar to that used by the go command's cache. Instead of atomic rename(2), or flock(2), it instead relies on the atomicity in practice of writes to small files (in our case 32 bytes, the size of a SHA256 hash). A cache entry now consists of two files, a kind="cas" file that holds the cache value, keyed by its SHA256 hash, and an index file, whose name is formed from the user-provided kind and key, and whose content is the SHA256 hash that is a key into the CAS. Writes to the CAS may race, so we check the integrity of everything we read back from it using SHA256. Writes to the index files may also race, but we assume that small writes are in practice atomic. The memory-based LRU cache has beeen temporarily disabled so that we can soak test the new implementation for a while. We expect this to be significantly more reliable, and also faster. Change-Id: I25cf341b90c985dcab015df770be579ea786bd06 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495800 Run-TryBot: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro Auto-Submit: Alan Donovan --- gopls/internal/lsp/filecache/filecache.go | 197 ++++++++----- .../lockedfile/internal/filelock/filelock.go | 99 ------- .../internal/filelock/filelock_fcntl.go | 215 -------------- .../internal/filelock/filelock_other.go | 37 --- .../internal/filelock/filelock_plan9.go | 37 --- .../internal/filelock/filelock_test.go | 209 -------------- .../internal/filelock/filelock_unix.go | 45 --- .../internal/filelock/filelock_windows.go | 67 ----- internal/lockedfile/lockedfile.go | 187 ------------ internal/lockedfile/lockedfile_filelock.go | 66 ----- internal/lockedfile/lockedfile_plan9.go | 95 ------- internal/lockedfile/lockedfile_test.go | 268 ------------------ internal/lockedfile/mutex.go | 67 ----- internal/lockedfile/transform_test.go | 104 ------- 14 files changed, 123 insertions(+), 1570 deletions(-) delete mode 100644 internal/lockedfile/internal/filelock/filelock.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_fcntl.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_other.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_plan9.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_test.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_unix.go delete mode 100644 internal/lockedfile/internal/filelock/filelock_windows.go delete mode 100644 internal/lockedfile/lockedfile.go delete mode 100644 internal/lockedfile/lockedfile_filelock.go delete mode 100644 internal/lockedfile/lockedfile_plan9.go delete mode 100644 internal/lockedfile/lockedfile_test.go delete mode 100644 internal/lockedfile/mutex.go delete mode 100644 internal/lockedfile/transform_test.go diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index c4e2ce4a27f..bc076aa2ec1 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -23,17 +23,14 @@ package filecache import ( "bytes" "crypto/sha256" - "encoding/binary" "encoding/hex" "errors" "fmt" - "hash/crc32" "io" "io/fs" "log" "os" "path/filepath" - "runtime" "sort" "sync" "sync/atomic" @@ -41,7 +38,6 @@ import ( "golang.org/x/tools/gopls/internal/bug" "golang.org/x/tools/gopls/internal/lsp/lru" - "golang.org/x/tools/internal/lockedfile" ) // Start causes the filecache to initialize and start garbage gollection. @@ -62,6 +58,8 @@ type memKey struct { key [32]byte } +const useMemCache = false // disabled for now while we debug the new file-based implementation + // Get retrieves from the cache and returns a newly allocated // copy of the value most recently supplied to Set(kind, key), // possibly by another process. @@ -70,67 +68,71 @@ func Get(kind string, key [32]byte) ([]byte, error) { // First consult the read-through memory cache. // Note that memory cache hits do not update the times // used for LRU eviction of the file-based cache. - if value := memCache.Get(memKey{kind, key}); value != nil { - return value.([]byte), nil + if useMemCache { + if value := memCache.Get(memKey{kind, key}); value != nil { + return value.([]byte), nil + } } iolimit <- struct{}{} // acquire a token defer func() { <-iolimit }() // release a token - name, err := filename(kind, key) + // Read the index file, which provides the name of the CAS file. + indexName, err := filename(kind, key) if err != nil { return nil, err } - data, err := lockedfile.Read(name) + indexData, err := os.ReadFile(indexName) if err != nil { if errors.Is(err, os.ErrNotExist) { return nil, ErrNotFound } return nil, err } - - // Verify that the Write was complete - // by checking the recorded length. - if len(data) < 8+4 { - return nil, ErrNotFound // cache entry is incomplete - } - length, value, checksum := data[:8], data[8:len(data)-4], data[len(data)-4:] - if binary.LittleEndian.Uint64(length) != uint64(len(value)) { - return nil, ErrNotFound // cache entry is incomplete (or too long!) + var valueHash [32]byte + if copy(valueHash[:], indexData) != len(valueHash) { + return nil, ErrNotFound // index entry has wrong length } - // Check for corruption and print the entire file content; see - // issue #59289. TODO(adonovan): stop printing the entire file - // once we've seen enough reports to understand the pattern. - if binary.LittleEndian.Uint32(checksum) != crc32.ChecksumIEEE(value) { - // Darwin has repeatedly displayed a problem (#59895) - // whereby the checksum portion (and only it) is zero, - // which suggests a bug in its file system . Don't - // panic, but keep an eye on other failures for now. - errorf := bug.Errorf - if binary.LittleEndian.Uint32(checksum) == 0 && runtime.GOOS == "darwin" { - errorf = fmt.Errorf - } - - return nil, errorf("internal error in filecache.Get(%q, %x): invalid checksum at end of %d-byte file %s:\n%q", - kind, key, len(data), name, data) + // Read the CAS file and check its contents match. + // + // This ensures integrity in all cases (corrupt or truncated + // file, short read, I/O error, wrong length, etc) except an + // engineered hash collision, which is infeasible. + casName, err := filename(casKind, valueHash) + if err != nil { + return nil, err + } + value, _ := os.ReadFile(casName) // ignore error + if sha256.Sum256(value) != valueHash { + return nil, ErrNotFound // CAS file is missing or has wrong contents } - // Update file time for use by LRU eviction. - // (This turns every read into a write operation. + // Update file times used by LRU eviction. + // + // This turns every read into a write operation. // If this is a performance problem, we should - // touch the files aynchronously.) + // touch the files asynchronously, or, follow + // the approach used in the go command's cache + // and update only if the existing timestamp is + // older than, say, one hour. // // (Traditionally the access time would be updated // automatically, but for efficiency most POSIX systems have // for many years set the noatime mount option to avoid every // open or read operation entailing a metadata write.) now := time.Now() - if err := os.Chtimes(name, now, now); err != nil { - return nil, fmt.Errorf("failed to update access time: %w", err) + if err := os.Chtimes(indexName, now, now); err != nil { + return nil, fmt.Errorf("failed to update access time of index file: %w", err) + } + if err := os.Chtimes(casName, now, now); err != nil { + return nil, fmt.Errorf("failed to update access time of CAS file: %w", err) + } + + if useMemCache { + memCache.Set(memKey{kind, key}, value, len(value)) } - memCache.Set(memKey{kind, key}, value, len(value)) return value, nil } @@ -140,50 +142,69 @@ var ErrNotFound = fmt.Errorf("not found") // Set updates the value in the cache. func Set(kind string, key [32]byte, value []byte) error { - memCache.Set(memKey{kind, key}, value, len(value)) + if useMemCache { + memCache.Set(memKey{kind, key}, value, len(value)) + } iolimit <- struct{}{} // acquire a token defer func() { <-iolimit }() // release a token - name, err := filename(kind, key) + // First, add the value to the content- + // addressable store (CAS), if not present. + hash := sha256.Sum256(value) + casName, err := filename(casKind, hash) if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(name), 0700); err != nil { + // Does CAS file exist and have correct (complete) content? + // TODO(adonovan): opt: use mmap for this check. + if prev, _ := os.ReadFile(casName); !bytes.Equal(prev, value) { + if err := os.MkdirAll(filepath.Dir(casName), 0700); err != nil { + return err + } + // Avoiding O_TRUNC here is merely an optimization to avoid + // cache misses when two threads race to write the same file. + if err := writeFileNoTrunc(casName, value, 0666); err != nil { + os.Remove(casName) // ignore error + return err // e.g. disk full + } + } + + // Now write an index entry that refers to the CAS file. + indexName, err := filename(kind, key) + if err != nil { return err } + if err := os.MkdirAll(filepath.Dir(indexName), 0700); err != nil { + return err + } + if err := writeFileNoTrunc(indexName, hash[:], 0666); err != nil { + os.Remove(indexName) // ignore error + return err // e.g. disk full + } - // In the unlikely event of a short write (e.g. ENOSPC) - // followed by process termination (e.g. a power cut), we - // don't want a reader to see a short file, so we record - // the expected length first and verify it in Get. - var length [8]byte - binary.LittleEndian.PutUint64(length[:], uint64(len(value))) - - // Occasional file corruption (presence of zero bytes in JSON - // files) has been reported on macOS (see issue #59289), - // assumed due to a nonatomicity problem in the file system. - // Ideally the macOS kernel would be fixed, or lockedfile - // would implement a workaround (since its job is to provide - // reliable the mutual exclusion primitive that allows - // cooperating gopls processes to implement transactional - // file replacement), but for now we add an extra integrity - // check: a 32-bit checksum at the end. - var checksum [4]byte - binary.LittleEndian.PutUint32(checksum[:], crc32.ChecksumIEEE(value)) - - // Windows doesn't support atomic rename--we tried MoveFile, - // MoveFileEx, ReplaceFileEx, and SetFileInformationByHandle - // of RenameFileInfo, all to no avail--so instead we use - // advisory file locking, which is only about 2x slower even - // on POSIX platforms with atomic rename. - return lockedfile.Write(name, io.MultiReader( - bytes.NewReader(length[:]), - bytes.NewReader(value), - bytes.NewReader(checksum[:])), - 0600) + return nil } +// writeFileNoTrunc is like os.WriteFile but doesn't truncate until +// after the write, so that racing writes of the same data are idempotent. +func writeFileNoTrunc(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, perm) + if err != nil { + return err + } + _, err = f.Write(data) + if err == nil { + err = f.Truncate(int64(len(data))) + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} + +const casKind = "cas" + var iolimit = make(chan struct{}, 128) // counting semaphore to limit I/O concurrency in Set. var budget int64 = 1e9 // 1GB @@ -204,9 +225,9 @@ func SetBudget(new int64) (old int64) { // --- implementation ---- -// filename returns the cache entry of the specified kind and key. +// filename returns the name of the cache file of the specified kind and key. // -// A typical cache entry is a file name such as: +// A typical cache file has a name such as: // // $HOME/Library/Caches / gopls / VVVVVVVV / kind / KK / KKKK...KKKK // @@ -218,8 +239,33 @@ func SetBudget(new int64) (old int64) { // - The first 8 bits of the key, to avoid huge directories. // - The full 256 bits of the key. // -// Once a file is written its contents are never modified, though it -// may be atomically replaced or removed. +// Previous iterations of the design aimed for the invariant that once +// a file is written, its contents are never modified, though it may +// be atomically replaced or removed. However, not all platforms have +// an atomic rename operation (our first approach), and file locking +// (our second) is a notoriously fickle mechanism. +// +// The current design instead exploits a trick from the cache +// implementation used by the go command: writes of small files are in +// practice atomic (all or nothing) on all platforms. +// (See GOROOT/src/cmd/go/internal/cache/cache.go.) +// +// We use a two-level scheme consisting of an index and a +// content-addressable store (CAS). A single cache entry consists of +// two files. The value of a cache entry is written into the file at +// filename("cas", sha256(value)). Since the value may be arbitrarily +// large, this write is not atomic. That means we must check the +// integrity of the contents read back from the CAS to make sure they +// hash to the expected key. If the CAS file is incomplete or +// inconsistent, we proceed as if it were missing. +// +// Once the CAS file has been written, we write a small fixed-size +// index file at filename(kind, key), using the values supplied by the +// caller. The index file contains the hash that identifies the value +// file in the CAS. (We could add a small amount of extra metadata to +// this file if later desired.) Because the index file is small, +// concurrent writes to it are atomic in practice, even though this is +// not guaranteed by any OS. // // New versions of gopls are free to reorganize the contents of the // version directory as needs evolve. But all versions of gopls must @@ -229,6 +275,9 @@ func SetBudget(new int64) (old int64) { // the entire gopls directory so that newer binaries can clean up // after older ones: in the development cycle especially, new // new versions may be created frequently. + +// TODO(adonovan): opt: use "VVVVVVVV / KK / KKKK...KKKK-kind" to +// avoid creating 256 directories per distinct kind (+ cas). func filename(kind string, key [32]byte) (string, error) { hex := fmt.Sprintf("%x", key) dir, err := getCacheDir() diff --git a/internal/lockedfile/internal/filelock/filelock.go b/internal/lockedfile/internal/filelock/filelock.go deleted file mode 100644 index 05f27c321a8..00000000000 --- a/internal/lockedfile/internal/filelock/filelock.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package filelock provides a platform-independent API for advisory file -// locking. Calls to functions in this package on platforms that do not support -// advisory locks will return errors for which IsNotSupported returns true. -package filelock - -import ( - "errors" - "io/fs" - "os" -) - -// A File provides the minimal set of methods required to lock an open file. -// File implementations must be usable as map keys. -// The usual implementation is *os.File. -type File interface { - // Name returns the name of the file. - Name() string - - // Fd returns a valid file descriptor. - // (If the File is an *os.File, it must not be closed.) - Fd() uintptr - - // Stat returns the FileInfo structure describing file. - Stat() (fs.FileInfo, error) -} - -// Lock places an advisory write lock on the file, blocking until it can be -// locked. -// -// If Lock returns nil, no other process will be able to place a read or write -// lock on the file until this process exits, closes f, or calls Unlock on it. -// -// If f's descriptor is already read- or write-locked, the behavior of Lock is -// unspecified. -// -// Closing the file may or may not release the lock promptly. Callers should -// ensure that Unlock is always called when Lock succeeds. -func Lock(f File) error { - return lock(f, writeLock) -} - -// RLock places an advisory read lock on the file, blocking until it can be locked. -// -// If RLock returns nil, no other process will be able to place a write lock on -// the file until this process exits, closes f, or calls Unlock on it. -// -// If f is already read- or write-locked, the behavior of RLock is unspecified. -// -// Closing the file may or may not release the lock promptly. Callers should -// ensure that Unlock is always called if RLock succeeds. -func RLock(f File) error { - return lock(f, readLock) -} - -// Unlock removes an advisory lock placed on f by this process. -// -// The caller must not attempt to unlock a file that is not locked. -func Unlock(f File) error { - return unlock(f) -} - -// String returns the name of the function corresponding to lt -// (Lock, RLock, or Unlock). -func (lt lockType) String() string { - switch lt { - case readLock: - return "RLock" - case writeLock: - return "Lock" - default: - return "Unlock" - } -} - -// IsNotSupported returns a boolean indicating whether the error is known to -// report that a function is not supported (possibly for a specific input). -// It is satisfied by ErrNotSupported as well as some syscall errors. -func IsNotSupported(err error) bool { - return isNotSupported(underlyingError(err)) -} - -var ErrNotSupported = errors.New("operation not supported") - -// underlyingError returns the underlying error for known os error types. -func underlyingError(err error) error { - switch err := err.(type) { - case *fs.PathError: - return err.Err - case *os.LinkError: - return err.Err - case *os.SyscallError: - return err.Err - } - return err -} diff --git a/internal/lockedfile/internal/filelock/filelock_fcntl.go b/internal/lockedfile/internal/filelock/filelock_fcntl.go deleted file mode 100644 index 30985191072..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_fcntl.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || (solaris && !illumos) -// +build aix solaris,!illumos - -// This code implements the filelock API using POSIX 'fcntl' locks, which attach -// to an (inode, process) pair rather than a file descriptor. To avoid unlocking -// files prematurely when the same file is opened through different descriptors, -// we allow only one read-lock at a time. -// -// Most platforms provide some alternative API, such as an 'flock' system call -// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and -// does not require per-inode bookkeeping in the application. - -package filelock - -import ( - "errors" - "io" - "io/fs" - "math/rand" - "sync" - "syscall" - "time" -) - -type lockType int16 - -const ( - readLock lockType = syscall.F_RDLCK - writeLock lockType = syscall.F_WRLCK -) - -type inode = uint64 // type of syscall.Stat_t.Ino - -type inodeLock struct { - owner File - queue []<-chan File -} - -var ( - mu sync.Mutex - inodes = map[File]inode{} - locks = map[inode]inodeLock{} -) - -func lock(f File, lt lockType) (err error) { - // POSIX locks apply per inode and process, and the lock for an inode is - // released when *any* descriptor for that inode is closed. So we need to - // synchronize access to each inode internally, and must serialize lock and - // unlock calls that refer to the same inode through different descriptors. - fi, err := f.Stat() - if err != nil { - return err - } - ino := fi.Sys().(*syscall.Stat_t).Ino - - mu.Lock() - if i, dup := inodes[f]; dup && i != ino { - mu.Unlock() - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: errors.New("inode for file changed since last Lock or RLock"), - } - } - inodes[f] = ino - - var wait chan File - l := locks[ino] - if l.owner == f { - // This file already owns the lock, but the call may change its lock type. - } else if l.owner == nil { - // No owner: it's ours now. - l.owner = f - } else { - // Already owned: add a channel to wait on. - wait = make(chan File) - l.queue = append(l.queue, wait) - } - locks[ino] = l - mu.Unlock() - - if wait != nil { - wait <- f - } - - // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at - // the process, rather than thread, level. Consider processes P and Q, with - // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be - // reported as a deadlock on systems that consider only process granularity: - // - // P.1 locks file A. - // Q.3 locks file B. - // Q.3 blocks on file A. - // P.2 blocks on file B. (This is erroneously reported as a deadlock.) - // P.1 unlocks file A. - // Q.3 unblocks and locks file A. - // Q.3 unlocks files A and B. - // P.2 unblocks and locks file B. - // P.2 unlocks file B. - // - // These spurious errors were observed in practice on AIX and Solaris in - // cmd/go: see https://golang.org/issue/32817. - // - // We work around this bug by treating EDEADLK as always spurious. If there - // really is a lock-ordering bug between the interacting processes, it will - // become a livelock instead, but that's not appreciably worse than if we had - // a proper flock implementation (which generally does not even attempt to - // diagnose deadlocks). - // - // In the above example, that changes the trace to: - // - // P.1 locks file A. - // Q.3 locks file B. - // Q.3 blocks on file A. - // P.2 spuriously fails to lock file B and goes to sleep. - // P.1 unlocks file A. - // Q.3 unblocks and locks file A. - // Q.3 unlocks files A and B. - // P.2 wakes up and locks file B. - // P.2 unlocks file B. - // - // We know that the retry loop will not introduce a *spurious* livelock - // because, according to the POSIX specification, EDEADLK is only to be - // returned when “the lock is blocked by a lock from another process”. - // If that process is blocked on some lock that we are holding, then the - // resulting livelock is due to a real deadlock (and would manifest as such - // when using, for example, the flock implementation of this package). - // If the other process is *not* blocked on some other lock that we are - // holding, then it will eventually release the requested lock. - - nextSleep := 1 * time.Millisecond - const maxSleep = 500 * time.Millisecond - for { - err = setlkw(f.Fd(), lt) - if err != syscall.EDEADLK { - break - } - time.Sleep(nextSleep) - - nextSleep += nextSleep - if nextSleep > maxSleep { - nextSleep = maxSleep - } - // Apply 10% jitter to avoid synchronizing collisions when we finally unblock. - nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) - } - - if err != nil { - unlock(f) - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: err, - } - } - - return nil -} - -func unlock(f File) error { - var owner File - - mu.Lock() - ino, ok := inodes[f] - if ok { - owner = locks[ino].owner - } - mu.Unlock() - - if owner != f { - panic("unlock called on a file that is not locked") - } - - err := setlkw(f.Fd(), syscall.F_UNLCK) - - mu.Lock() - l := locks[ino] - if len(l.queue) == 0 { - // No waiters: remove the map entry. - delete(locks, ino) - } else { - // The first waiter is sending us their file now. - // Receive it and update the queue. - l.owner = <-l.queue[0] - l.queue = l.queue[1:] - locks[ino] = l - } - delete(inodes, f) - mu.Unlock() - - return err -} - -// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. -func setlkw(fd uintptr, lt lockType) error { - for { - err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ - Type: int16(lt), - Whence: io.SeekStart, - Start: 0, - Len: 0, // All bytes. - }) - if err != syscall.EINTR { - return err - } - } -} - -func isNotSupported(err error) bool { - return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported -} diff --git a/internal/lockedfile/internal/filelock/filelock_other.go b/internal/lockedfile/internal/filelock/filelock_other.go deleted file mode 100644 index cde868f49b0..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_other.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !(aix || darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd || solaris) && !plan9 && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!illumos,!linux,!netbsd,!openbsd,!solaris,!plan9,!windows - -package filelock - -import "io/fs" - -type lockType int8 - -const ( - readLock = iota + 1 - writeLock -) - -func lock(f File, lt lockType) error { - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: ErrNotSupported, - } -} - -func unlock(f File) error { - return &fs.PathError{ - Op: "Unlock", - Path: f.Name(), - Err: ErrNotSupported, - } -} - -func isNotSupported(err error) bool { - return err == ErrNotSupported -} diff --git a/internal/lockedfile/internal/filelock/filelock_plan9.go b/internal/lockedfile/internal/filelock/filelock_plan9.go deleted file mode 100644 index 908afb6c8cb..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_plan9.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 -// +build plan9 - -package filelock - -import "io/fs" - -type lockType int8 - -const ( - readLock = iota + 1 - writeLock -) - -func lock(f File, lt lockType) error { - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: ErrNotSupported, - } -} - -func unlock(f File) error { - return &fs.PathError{ - Op: "Unlock", - Path: f.Name(), - Err: ErrNotSupported, - } -} - -func isNotSupported(err error) bool { - return err == ErrNotSupported -} diff --git a/internal/lockedfile/internal/filelock/filelock_test.go b/internal/lockedfile/internal/filelock/filelock_test.go deleted file mode 100644 index 6c3f3933f33..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_test.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows -// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris windows - -package filelock_test - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "testing" - "time" - - "golang.org/x/tools/internal/lockedfile/internal/filelock" -) - -func lock(t *testing.T, f *os.File) { - t.Helper() - err := filelock.Lock(f) - t.Logf("Lock(fd %d) = %v", f.Fd(), err) - if err != nil { - t.Fail() - } -} - -func rLock(t *testing.T, f *os.File) { - t.Helper() - err := filelock.RLock(f) - t.Logf("RLock(fd %d) = %v", f.Fd(), err) - if err != nil { - t.Fail() - } -} - -func unlock(t *testing.T, f *os.File) { - t.Helper() - err := filelock.Unlock(f) - t.Logf("Unlock(fd %d) = %v", f.Fd(), err) - if err != nil { - t.Fail() - } -} - -func mustTempFile(t *testing.T) (f *os.File, remove func()) { - t.Helper() - - base := filepath.Base(t.Name()) - f, err := os.CreateTemp("", base) - if err != nil { - t.Fatalf(`os.CreateTemp("", %q) = %v`, base, err) - } - t.Logf("fd %d = %s", f.Fd(), f.Name()) - - return f, func() { - f.Close() - os.Remove(f.Name()) - } -} - -func mustOpen(t *testing.T, name string) *os.File { - t.Helper() - - f, err := os.OpenFile(name, os.O_RDWR, 0) - if err != nil { - t.Fatalf("os.Open(%q) = %v", name, err) - } - - t.Logf("fd %d = os.Open(%q)", f.Fd(), name) - return f -} - -const ( - quiescent = 10 * time.Millisecond - probablyStillBlocked = 10 * time.Second -) - -func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) { - t.Helper() - - desc := fmt.Sprintf("%s(fd %d)", op, f.Fd()) - - done := make(chan struct{}) - go func() { - t.Helper() - switch op { - case "Lock": - lock(t, f) - case "RLock": - rLock(t, f) - default: - panic("invalid op: " + op) - } - close(done) - }() - - select { - case <-done: - t.Fatalf("%s unexpectedly did not block", desc) - return nil - - case <-time.After(quiescent): - t.Logf("%s is blocked (as expected)", desc) - return func(t *testing.T) { - t.Helper() - select { - case <-time.After(probablyStillBlocked): - t.Fatalf("%s is unexpectedly still blocked", desc) - case <-done: - } - } - } -} - -func TestLockExcludesLock(t *testing.T) { - t.Parallel() - - f, remove := mustTempFile(t) - defer remove() - - other := mustOpen(t, f.Name()) - defer other.Close() - - lock(t, f) - lockOther := mustBlock(t, "Lock", other) - unlock(t, f) - lockOther(t) - unlock(t, other) -} - -func TestLockExcludesRLock(t *testing.T) { - t.Parallel() - - f, remove := mustTempFile(t) - defer remove() - - other := mustOpen(t, f.Name()) - defer other.Close() - - lock(t, f) - rLockOther := mustBlock(t, "RLock", other) - unlock(t, f) - rLockOther(t) - unlock(t, other) -} - -func TestRLockExcludesOnlyLock(t *testing.T) { - t.Parallel() - - f, remove := mustTempFile(t) - defer remove() - rLock(t, f) - - f2 := mustOpen(t, f.Name()) - defer f2.Close() - - doUnlockTF := false - switch runtime.GOOS { - case "aix", "solaris": - // When using POSIX locks (as on Solaris), we can't safely read-lock the - // same inode through two different descriptors at the same time: when the - // first descriptor is closed, the second descriptor would still be open but - // silently unlocked. So a second RLock must block instead of proceeding. - lockF2 := mustBlock(t, "RLock", f2) - unlock(t, f) - lockF2(t) - default: - rLock(t, f2) - doUnlockTF = true - } - - other := mustOpen(t, f.Name()) - defer other.Close() - lockOther := mustBlock(t, "Lock", other) - - unlock(t, f2) - if doUnlockTF { - unlock(t, f) - } - lockOther(t) - unlock(t, other) -} - -func TestLockNotDroppedByExecCommand(t *testing.T) { - f, remove := mustTempFile(t) - defer remove() - - lock(t, f) - - other := mustOpen(t, f.Name()) - defer other.Close() - - // Some kinds of file locks are dropped when a duplicated or forked file - // descriptor is unlocked. Double-check that the approach used by os/exec does - // not accidentally drop locks. - cmd := exec.Command(os.Args[0], "-test.run=^$") - if err := cmd.Run(); err != nil { - t.Fatalf("exec failed: %v", err) - } - - lockOther := mustBlock(t, "Lock", other) - unlock(t, f) - lockOther(t) - unlock(t, other) -} diff --git a/internal/lockedfile/internal/filelock/filelock_unix.go b/internal/lockedfile/internal/filelock/filelock_unix.go deleted file mode 100644 index 878a1e770d4..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd -// +build darwin dragonfly freebsd illumos linux netbsd openbsd - -package filelock - -import ( - "io/fs" - "syscall" -) - -type lockType int16 - -const ( - readLock lockType = syscall.LOCK_SH - writeLock lockType = syscall.LOCK_EX -) - -func lock(f File, lt lockType) (err error) { - for { - err = syscall.Flock(int(f.Fd()), int(lt)) - if err != syscall.EINTR { - break - } - } - if err != nil { - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: err, - } - } - return nil -} - -func unlock(f File) error { - return lock(f, syscall.LOCK_UN) -} - -func isNotSupported(err error) bool { - return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported -} diff --git a/internal/lockedfile/internal/filelock/filelock_windows.go b/internal/lockedfile/internal/filelock/filelock_windows.go deleted file mode 100644 index 3273a818272..00000000000 --- a/internal/lockedfile/internal/filelock/filelock_windows.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package filelock - -import ( - "io/fs" - - "golang.org/x/sys/windows" -) - -type lockType uint32 - -const ( - readLock lockType = 0 - writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK -) - -const ( - reserved = 0 - allBytes = ^uint32(0) -) - -func lock(f File, lt lockType) error { - // Per https://golang.org/issue/19098, “Programs currently expect the Fd - // method to return a handle that uses ordinary synchronous I/O.” - // However, LockFileEx still requires an OVERLAPPED structure, - // which contains the file offset of the beginning of the lock range. - // We want to lock the entire file, so we leave the offset as zero. - ol := new(windows.Overlapped) - - err := windows.LockFileEx(windows.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) - if err != nil { - return &fs.PathError{ - Op: lt.String(), - Path: f.Name(), - Err: err, - } - } - return nil -} - -func unlock(f File) error { - ol := new(windows.Overlapped) - err := windows.UnlockFileEx(windows.Handle(f.Fd()), reserved, allBytes, allBytes, ol) - if err != nil { - return &fs.PathError{ - Op: "Unlock", - Path: f.Name(), - Err: err, - } - } - return nil -} - -func isNotSupported(err error) bool { - switch err { - case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported: - return true - default: - return false - } -} diff --git a/internal/lockedfile/lockedfile.go b/internal/lockedfile/lockedfile.go deleted file mode 100644 index 82e1a89675e..00000000000 --- a/internal/lockedfile/lockedfile.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lockedfile creates and manipulates files whose contents should only -// change atomically. -package lockedfile - -import ( - "fmt" - "io" - "io/fs" - "os" - "runtime" -) - -// A File is a locked *os.File. -// -// Closing the file releases the lock. -// -// If the program exits while a file is locked, the operating system releases -// the lock but may not do so promptly: callers must ensure that all locked -// files are closed before exiting. -type File struct { - osFile - closed bool -} - -// osFile embeds a *os.File while keeping the pointer itself unexported. -// (When we close a File, it must be the same file descriptor that we opened!) -type osFile struct { - *os.File -} - -// OpenFile is like os.OpenFile, but returns a locked file. -// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; -// otherwise, it is read-locked. -func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) { - var ( - f = new(File) - err error - ) - f.osFile.File, err = openFile(name, flag, perm) - if err != nil { - return nil, err - } - - // Although the operating system will drop locks for open files when the go - // command exits, we want to hold locks for as little time as possible, and we - // especially don't want to leave a file locked after we're done with it. Our - // Close method is what releases the locks, so use a finalizer to report - // missing Close calls on a best-effort basis. - runtime.SetFinalizer(f, func(f *File) { - panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) - }) - - return f, nil -} - -// Open is like os.Open, but returns a read-locked file. -func Open(name string) (*File, error) { - return OpenFile(name, os.O_RDONLY, 0) -} - -// Create is like os.Create, but returns a write-locked file. -func Create(name string) (*File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) -} - -// Edit creates the named file with mode 0666 (before umask), -// but does not truncate existing contents. -// -// If Edit succeeds, methods on the returned File can be used for I/O. -// The associated file descriptor has mode O_RDWR and the file is write-locked. -func Edit(name string) (*File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) -} - -// Close unlocks and closes the underlying file. -// -// Close may be called multiple times; all calls after the first will return a -// non-nil error. -func (f *File) Close() error { - if f.closed { - return &fs.PathError{ - Op: "close", - Path: f.Name(), - Err: fs.ErrClosed, - } - } - f.closed = true - - err := closeFile(f.osFile.File) - runtime.SetFinalizer(f, nil) - return err -} - -// Read opens the named file with a read-lock and returns its contents. -func Read(name string) ([]byte, error) { - f, err := Open(name) - if err != nil { - return nil, err - } - defer f.Close() - - return io.ReadAll(f) -} - -// Write opens the named file (creating it with the given permissions if needed), -// then write-locks it and overwrites it with the given content. -func Write(name string, content io.Reader, perm fs.FileMode) (err error) { - f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - - _, err = io.Copy(f, content) - if closeErr := f.Close(); err == nil { - err = closeErr - } - return err -} - -// Transform invokes t with the result of reading the named file, with its lock -// still held. -// -// If t returns a nil error, Transform then writes the returned contents back to -// the file, making a best effort to preserve existing contents on error. -// -// t must not modify the slice passed to it. -func Transform(name string, t func([]byte) ([]byte, error)) (err error) { - f, err := Edit(name) - if err != nil { - return err - } - defer f.Close() - - old, err := io.ReadAll(f) - if err != nil { - return err - } - - new, err := t(old) - if err != nil { - return err - } - - if len(new) > len(old) { - // The overall file size is increasing, so write the tail first: if we're - // about to run out of space on the disk, we would rather detect that - // failure before we have overwritten the original contents. - if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil { - // Make a best effort to remove the incomplete tail. - f.Truncate(int64(len(old))) - return err - } - } - - // We're about to overwrite the old contents. In case of failure, make a best - // effort to roll back before we close the file. - defer func() { - if err != nil { - if _, err := f.WriteAt(old, 0); err == nil { - f.Truncate(int64(len(old))) - } - } - }() - - if len(new) >= len(old) { - if _, err := f.WriteAt(new[:len(old)], 0); err != nil { - return err - } - } else { - if _, err := f.WriteAt(new, 0); err != nil { - return err - } - // The overall file size is decreasing, so shrink the file to its final size - // after writing. We do this after writing (instead of before) so that if - // the write fails, enough filesystem space will likely still be reserved - // to contain the previous contents. - if err := f.Truncate(int64(len(new))); err != nil { - return err - } - } - - return nil -} diff --git a/internal/lockedfile/lockedfile_filelock.go b/internal/lockedfile/lockedfile_filelock.go deleted file mode 100644 index 7c71672c811..00000000000 --- a/internal/lockedfile/lockedfile_filelock.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !plan9 -// +build !plan9 - -package lockedfile - -import ( - "io/fs" - "os" - - "golang.org/x/tools/internal/lockedfile/internal/filelock" -) - -func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { - // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile - // call instead of locking separately, but we have to support separate locking - // calls for Linux and Windows anyway, so it's simpler to use that approach - // consistently. - - f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) - if err != nil { - return nil, err - } - - switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { - case os.O_WRONLY, os.O_RDWR: - err = filelock.Lock(f) - default: - err = filelock.RLock(f) - } - if err != nil { - f.Close() - return nil, err - } - - if flag&os.O_TRUNC == os.O_TRUNC { - if err := f.Truncate(0); err != nil { - // The documentation for os.O_TRUNC says “if possible, truncate file when - // opened”, but doesn't define “possible” (golang.org/issue/28699). - // We'll treat regular files (and symlinks to regular files) as “possible” - // and ignore errors for the rest. - if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { - filelock.Unlock(f) - f.Close() - return nil, err - } - } - } - - return f, nil -} - -func closeFile(f *os.File) error { - // Since locking syscalls operate on file descriptors, we must unlock the file - // while the descriptor is still valid — that is, before the file is closed — - // and avoid unlocking files that are already closed. - err := filelock.Unlock(f) - - if closeErr := f.Close(); err == nil { - err = closeErr - } - return err -} diff --git a/internal/lockedfile/lockedfile_plan9.go b/internal/lockedfile/lockedfile_plan9.go deleted file mode 100644 index 40871e610cd..00000000000 --- a/internal/lockedfile/lockedfile_plan9.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 -// +build plan9 - -package lockedfile - -import ( - "io/fs" - "math/rand" - "os" - "strings" - "time" -) - -// Opening an exclusive-use file returns an error. -// The expected error strings are: -// -// - "open/create -- file is locked" (cwfs, kfs) -// - "exclusive lock" (fossil) -// - "exclusive use file already open" (ramfs) -var lockedErrStrings = [...]string{ - "file is locked", - "exclusive lock", - "exclusive use file already open", -} - -// Even though plan9 doesn't support the Lock/RLock/Unlock functions to -// manipulate already-open files, IsLocked is still meaningful: os.OpenFile -// itself may return errors that indicate that a file with the ModeExclusive bit -// set is already open. -func isLocked(err error) bool { - s := err.Error() - - for _, frag := range lockedErrStrings { - if strings.Contains(s, frag) { - return true - } - } - - return false -} - -func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { - // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. - // - // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open - // for I/O by only one fid at a time across all clients of the server. If a - // second open is attempted, it draws an error.” - // - // So we can try to open a locked file, but if it fails we're on our own to - // figure out when it becomes available. We'll use exponential backoff with - // some jitter and an arbitrary limit of 500ms. - - // If the file was unpacked or created by some other program, it might not - // have the ModeExclusive bit set. Set it before we call OpenFile, so that we - // can be confident that a successful OpenFile implies exclusive use. - if fi, err := os.Stat(name); err == nil { - if fi.Mode()&fs.ModeExclusive == 0 { - if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil { - return nil, err - } - } - } else if !os.IsNotExist(err) { - return nil, err - } - - nextSleep := 1 * time.Millisecond - const maxSleep = 500 * time.Millisecond - for { - f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive) - if err == nil { - return f, nil - } - - if !isLocked(err) { - return nil, err - } - - time.Sleep(nextSleep) - - nextSleep += nextSleep - if nextSleep > maxSleep { - nextSleep = maxSleep - } - // Apply 10% jitter to avoid synchronizing collisions. - nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) - } -} - -func closeFile(f *os.File) error { - return f.Close() -} diff --git a/internal/lockedfile/lockedfile_test.go b/internal/lockedfile/lockedfile_test.go deleted file mode 100644 index edf885112f7..00000000000 --- a/internal/lockedfile/lockedfile_test.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || plan9 || windows -// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris plan9 windows - -package lockedfile_test - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "golang.org/x/tools/internal/lockedfile" -) - -func mustTempDir(t *testing.T) (dir string, remove func()) { - t.Helper() - - dir, err := os.MkdirTemp("", filepath.Base(t.Name())) - if err != nil { - t.Fatal(err) - } - return dir, func() { os.RemoveAll(dir) } -} - -const ( - quiescent = 10 * time.Millisecond - probablyStillBlocked = 10 * time.Second -) - -func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) { - t.Helper() - - done := make(chan struct{}) - go func() { - f() - close(done) - }() - - select { - case <-done: - t.Fatalf("%s unexpectedly did not block", desc) - return nil - - case <-time.After(quiescent): - return func(t *testing.T) { - t.Helper() - select { - case <-time.After(probablyStillBlocked): - t.Fatalf("%s is unexpectedly still blocked after %v", desc, probablyStillBlocked) - case <-done: - } - } - } -} - -func TestMutexExcludes(t *testing.T) { - t.Parallel() - - dir, remove := mustTempDir(t) - defer remove() - - path := filepath.Join(dir, "lock") - - mu := lockedfile.MutexAt(path) - t.Logf("mu := MutexAt(_)") - - unlock, err := mu.Lock() - if err != nil { - t.Fatalf("mu.Lock: %v", err) - } - t.Logf("unlock, _ := mu.Lock()") - - mu2 := lockedfile.MutexAt(mu.Path) - t.Logf("mu2 := MutexAt(mu.Path)") - - wait := mustBlock(t, "mu2.Lock()", func() { - unlock2, err := mu2.Lock() - if err != nil { - t.Errorf("mu2.Lock: %v", err) - return - } - t.Logf("unlock2, _ := mu2.Lock()") - t.Logf("unlock2()") - unlock2() - }) - - t.Logf("unlock()") - unlock() - wait(t) -} - -func TestReadWaitsForLock(t *testing.T) { - t.Parallel() - - dir, remove := mustTempDir(t) - defer remove() - - path := filepath.Join(dir, "timestamp.txt") - - f, err := lockedfile.Create(path) - if err != nil { - t.Fatalf("Create: %v", err) - } - defer f.Close() - - const ( - part1 = "part 1\n" - part2 = "part 2\n" - ) - _, err = f.WriteString(part1) - if err != nil { - t.Fatalf("WriteString: %v", err) - } - t.Logf("WriteString(%q) = ", part1) - - wait := mustBlock(t, "Read", func() { - b, err := lockedfile.Read(path) - if err != nil { - t.Errorf("Read: %v", err) - return - } - - const want = part1 + part2 - got := string(b) - if got == want { - t.Logf("Read(_) = %q", got) - } else { - t.Errorf("Read(_) = %q, _; want %q", got, want) - } - }) - - _, err = f.WriteString(part2) - if err != nil { - t.Errorf("WriteString: %v", err) - } else { - t.Logf("WriteString(%q) = ", part2) - } - f.Close() - - wait(t) -} - -func TestCanLockExistingFile(t *testing.T) { - t.Parallel() - - dir, remove := mustTempDir(t) - defer remove() - path := filepath.Join(dir, "existing.txt") - - if err := os.WriteFile(path, []byte("ok"), 0777); err != nil { - t.Fatalf("os.WriteFile: %v", err) - } - - f, err := lockedfile.Edit(path) - if err != nil { - t.Fatalf("first Edit: %v", err) - } - - wait := mustBlock(t, "Edit", func() { - other, err := lockedfile.Edit(path) - if err != nil { - t.Errorf("second Edit: %v", err) - } - other.Close() - }) - - f.Close() - wait(t) -} - -// TestSpuriousEDEADLK verifies that the spurious EDEADLK reported in -// https://golang.org/issue/32817 no longer occurs. -func TestSpuriousEDEADLK(t *testing.T) { - // P.1 locks file A. - // Q.3 locks file B. - // Q.3 blocks on file A. - // P.2 blocks on file B. (Spurious EDEADLK occurs here.) - // P.1 unlocks file A. - // Q.3 unblocks and locks file A. - // Q.3 unlocks files A and B. - // P.2 unblocks and locks file B. - // P.2 unlocks file B. - - dirVar := t.Name() + "DIR" - - if dir := os.Getenv(dirVar); dir != "" { - // Q.3 locks file B. - b, err := lockedfile.Edit(filepath.Join(dir, "B")) - if err != nil { - t.Fatal(err) - } - defer b.Close() - - if err := os.WriteFile(filepath.Join(dir, "locked"), []byte("ok"), 0666); err != nil { - t.Fatal(err) - } - - // Q.3 blocks on file A. - a, err := lockedfile.Edit(filepath.Join(dir, "A")) - // Q.3 unblocks and locks file A. - if err != nil { - t.Fatal(err) - } - defer a.Close() - - // Q.3 unlocks files A and B. - return - } - - dir, remove := mustTempDir(t) - defer remove() - - // P.1 locks file A. - a, err := lockedfile.Edit(filepath.Join(dir, "A")) - if err != nil { - t.Fatal(err) - } - - cmd := exec.Command(os.Args[0], "-test.run="+t.Name()) - cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir)) - - qDone := make(chan struct{}) - waitQ := mustBlock(t, "Edit A and B in subprocess", func() { - out, err := cmd.CombinedOutput() - if err != nil { - t.Errorf("%v:\n%s", err, out) - } - close(qDone) - }) - - // Wait until process Q has either failed or locked file B. - // Otherwise, P.2 might not block on file B as intended. -locked: - for { - if _, err := os.Stat(filepath.Join(dir, "locked")); !os.IsNotExist(err) { - break locked - } - select { - case <-qDone: - break locked - case <-time.After(1 * time.Millisecond): - } - } - - waitP2 := mustBlock(t, "Edit B", func() { - // P.2 blocks on file B. (Spurious EDEADLK occurs here.) - b, err := lockedfile.Edit(filepath.Join(dir, "B")) - // P.2 unblocks and locks file B. - if err != nil { - t.Error(err) - return - } - // P.2 unlocks file B. - b.Close() - }) - - // P.1 unlocks file A. - a.Close() - - waitQ(t) - waitP2(t) -} diff --git a/internal/lockedfile/mutex.go b/internal/lockedfile/mutex.go deleted file mode 100644 index 180a36c6201..00000000000 --- a/internal/lockedfile/mutex.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lockedfile - -import ( - "fmt" - "os" - "sync" -) - -// A Mutex provides mutual exclusion within and across processes by locking a -// well-known file. Such a file generally guards some other part of the -// filesystem: for example, a Mutex file in a directory might guard access to -// the entire tree rooted in that directory. -// -// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex -// can fail to lock (e.g. if there is a permission error in the filesystem). -// -// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but -// must not be copied after first use. The Path field must be set before first -// use and must not be change thereafter. -type Mutex struct { - Path string // The path to the well-known lock file. Must be non-empty. - mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands. -} - -// MutexAt returns a new Mutex with Path set to the given non-empty path. -func MutexAt(path string) *Mutex { - if path == "" { - panic("lockedfile.MutexAt: path must be non-empty") - } - return &Mutex{Path: path} -} - -func (mu *Mutex) String() string { - return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) -} - -// Lock attempts to lock the Mutex. -// -// If successful, Lock returns a non-nil unlock function: it is provided as a -// return-value instead of a separate method to remind the caller to check the -// accompanying error. (See https://golang.org/issue/20803.) -func (mu *Mutex) Lock() (unlock func(), err error) { - if mu.Path == "" { - panic("lockedfile.Mutex: missing Path during Lock") - } - - // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the - // file at mu.Path is write-only, the call to OpenFile will fail with a - // permission error. That's actually what we want: if we add an RLock method - // in the future, it should call OpenFile with O_RDONLY and will require the - // files must be readable, so we should not let the caller make any - // assumptions about Mutex working with write-only files. - f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - return nil, err - } - mu.mu.Lock() - - return func() { - mu.mu.Unlock() - f.Close() - }, nil -} diff --git a/internal/lockedfile/transform_test.go b/internal/lockedfile/transform_test.go deleted file mode 100644 index 2c0cd533aa9..00000000000 --- a/internal/lockedfile/transform_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || plan9 || windows -// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris plan9 windows - -package lockedfile_test - -import ( - "bytes" - "encoding/binary" - "math/rand" - "path/filepath" - "testing" - "time" - - "golang.org/x/tools/internal/lockedfile" -) - -func isPowerOf2(x int) bool { - return x > 0 && x&(x-1) == 0 -} - -func roundDownToPowerOf2(x int) int { - if x <= 0 { - panic("nonpositive x") - } - bit := 1 - for x != bit { - x = x &^ bit - bit <<= 1 - } - return x -} - -func TestTransform(t *testing.T) { - dir, remove := mustTempDir(t) - defer remove() - path := filepath.Join(dir, "blob.bin") - - const maxChunkWords = 8 << 10 - buf := make([]byte, 2*maxChunkWords*8) - for i := uint64(0); i < 2*maxChunkWords; i++ { - binary.LittleEndian.PutUint64(buf[i*8:], i) - } - if err := lockedfile.Write(path, bytes.NewReader(buf[:8]), 0666); err != nil { - t.Fatal(err) - } - - var attempts int64 = 128 - if !testing.Short() { - attempts *= 16 - } - const parallel = 32 - - var sem = make(chan bool, parallel) - - for n := attempts; n > 0; n-- { - sem <- true - go func() { - defer func() { <-sem }() - - time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) - chunkWords := roundDownToPowerOf2(rand.Intn(maxChunkWords) + 1) - offset := rand.Intn(chunkWords) - - err := lockedfile.Transform(path, func(data []byte) (chunk []byte, err error) { - chunk = buf[offset*8 : (offset+chunkWords)*8] - - if len(data)&^7 != len(data) { - t.Errorf("read %d bytes, but each write is an integer multiple of 8 bytes", len(data)) - return chunk, nil - } - - words := len(data) / 8 - if !isPowerOf2(words) { - t.Errorf("read %d 8-byte words, but each write is a power-of-2 number of words", words) - return chunk, nil - } - - u := binary.LittleEndian.Uint64(data) - for i := 1; i < words; i++ { - next := binary.LittleEndian.Uint64(data[i*8:]) - if next != u+1 { - t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i) - return chunk, nil - } - u = next - } - - return chunk, nil - }) - - if err != nil { - t.Errorf("unexpected error from Transform: %v", err) - } - }() - } - - for n := parallel; n > 0; n-- { - sem <- true - } -} From 3df69b827aad452b5f545c4c1ed1733e0767829d Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 18 May 2023 18:08:39 -0400 Subject: [PATCH 042/109] gopls/internal/lsp/debug: remove memory monitoring I noticed in passing that the withNames parameter no longer had any effect since the relevant logic was deleted in CL 466975. (https://go-review.googlesource.com/c/tools/+/466975/48/gopls/internal/lsp/debug/serve.go#b548). The whole memory monitoring feature is redundant wrt the pprof endpoints, and was never used. This change deletes it. Change-Id: Id8a084b70b6f49d6c95e563b167f99afc68cff9b Reviewed-on: https://go-review.googlesource.com/c/tools/+/496191 Run-TryBot: Alan Donovan gopls-CI: kokoro Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- gopls/internal/lsp/cmd/serve.go | 1 - gopls/internal/lsp/debug/serve.go | 61 ------------------------------- 2 files changed, 62 deletions(-) diff --git a/gopls/internal/lsp/cmd/serve.go b/gopls/internal/lsp/cmd/serve.go index df42e7983c1..03cc187c3fa 100644 --- a/gopls/internal/lsp/cmd/serve.go +++ b/gopls/internal/lsp/cmd/serve.go @@ -90,7 +90,6 @@ func (s *Serve) Run(ctx context.Context, args ...string) error { } defer closeLog() di.ServerAddress = s.Address - di.MonitorMemory(ctx) di.Serve(ctx, s.Debug) } var ss jsonrpc2.StreamServer diff --git a/gopls/internal/lsp/debug/serve.go b/gopls/internal/lsp/debug/serve.go index 3c17dadff3c..f36a2385739 100644 --- a/gopls/internal/lsp/debug/serve.go +++ b/gopls/internal/lsp/debug/serve.go @@ -5,7 +5,6 @@ package debug import ( - "archive/zip" "bytes" "context" "errors" @@ -20,7 +19,6 @@ import ( "path" "path/filepath" "runtime" - rpprof "runtime/pprof" "strconv" "strings" "sync" @@ -494,65 +492,6 @@ func (i *Instance) ListenedDebugAddress() string { return i.listenedDebugAddress } -// MonitorMemory starts recording memory statistics each second. -func (i *Instance) MonitorMemory(ctx context.Context) { - tick := time.NewTicker(time.Second) - nextThresholdGiB := uint64(1) - go func() { - for { - <-tick.C - var mem runtime.MemStats - runtime.ReadMemStats(&mem) - if mem.HeapAlloc < nextThresholdGiB*1<<30 { - continue - } - if err := i.writeMemoryDebug(nextThresholdGiB, true); err != nil { - event.Error(ctx, "writing memory debug info", err) - } - if err := i.writeMemoryDebug(nextThresholdGiB, false); err != nil { - event.Error(ctx, "writing memory debug info", err) - } - event.Log(ctx, fmt.Sprintf("Wrote memory usage debug info to %v", os.TempDir())) - nextThresholdGiB++ - } - }() -} - -func (i *Instance) writeMemoryDebug(threshold uint64, withNames bool) error { - suffix := "withnames" - if !withNames { - suffix = "nonames" - } - - filename := fmt.Sprintf("gopls.%d-%dGiB-%s.zip", os.Getpid(), threshold, suffix) - zipf, err := os.OpenFile(filepath.Join(os.TempDir(), filename), os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return err - } - zipw := zip.NewWriter(zipf) - - f, err := zipw.Create("heap.pb.gz") - if err != nil { - return err - } - if err := rpprof.Lookup("heap").WriteTo(f, 0); err != nil { - return err - } - - f, err = zipw.Create("goroutines.txt") - if err != nil { - return err - } - if err := rpprof.Lookup("goroutine").WriteTo(f, 1); err != nil { - return err - } - - if err := zipw.Close(); err != nil { - return err - } - return zipf.Close() -} - func makeGlobalExporter(stderr io.Writer) event.Exporter { p := export.Printer{} var pMu sync.Mutex From e46df400eba350b6e459911505fb74021c718bd1 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 11:03:19 -0400 Subject: [PATCH 043/109] gopls/internal/lsp/filecache: delayed tweaks from code review These were supposed to be part of CL 495800 but I failed to notice that 'git codereview mail' failed due to my failure to refresh my SSO certs. Sorry for the fumble. Change-Id: I8e08d2624cc365defc6f5848e9178267f313917d Reviewed-on: https://go-review.googlesource.com/c/tools/+/496436 Run-TryBot: Alan Donovan Reviewed-by: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/internal/lsp/filecache/filecache.go | 35 ++++++++++++++++------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index bc076aa2ec1..6441834090f 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -164,7 +164,7 @@ func Set(kind string, key [32]byte, value []byte) error { } // Avoiding O_TRUNC here is merely an optimization to avoid // cache misses when two threads race to write the same file. - if err := writeFileNoTrunc(casName, value, 0666); err != nil { + if err := writeFileNoTrunc(casName, value, 0600); err != nil { os.Remove(casName) // ignore error return err // e.g. disk full } @@ -178,7 +178,7 @@ func Set(kind string, key [32]byte, value []byte) error { if err := os.MkdirAll(filepath.Dir(indexName), 0700); err != nil { return err } - if err := writeFileNoTrunc(indexName, hash[:], 0666); err != nil { + if err := writeFileNoTrunc(indexName, hash[:], 0600); err != nil { os.Remove(indexName) // ignore error return err // e.g. disk full } @@ -203,19 +203,23 @@ func writeFileNoTrunc(filename string, data []byte, perm os.FileMode) error { return err } -const casKind = "cas" +const casKind = "cas" // kind for CAS (content-addressable store) files var iolimit = make(chan struct{}, 128) // counting semaphore to limit I/O concurrency in Set. var budget int64 = 1e9 // 1GB -// SetBudget sets a soft limit on disk usage of the cache (in bytes) -// and returns the previous value. Supplying a negative value queries -// the current value without changing it. +// SetBudget sets a soft limit on disk usage of files in the cache (in +// bytes) and returns the previous value. Supplying a negative value +// queries the current value without changing it. // // If two gopls processes have different budgets, the one with the // lower budget will collect garbage more actively, but both will // observe the effect. +// +// Even in the steady state, the storage usage reported by the 'du' +// command may exceed the budget by as much as 50-70% due to the +// overheads of directories and the effects of block quantization. func SetBudget(new int64) (old int64) { if new < 0 { return atomic.LoadInt64(&budget) @@ -250,6 +254,15 @@ func SetBudget(new int64) (old int64) { // practice atomic (all or nothing) on all platforms. // (See GOROOT/src/cmd/go/internal/cache/cache.go.) // +// Russ Cox notes: "all file systems use an rwlock around every file +// system block, including data blocks, so any writes or reads within +// the same block are going to be handled atomically by the FS +// implementation without any need to request file locking explicitly. +// And since the files are so small, there's only one block. (A block +// is at minimum 512 bytes, usually much more.)" And: "all modern file +// systems protect against [partial writes due to power loss] with +// journals." +// // We use a two-level scheme consisting of an index and a // content-addressable store (CAS). A single cache entry consists of // two files. The value of a cache entry is written into the file at @@ -262,10 +275,12 @@ func SetBudget(new int64) (old int64) { // Once the CAS file has been written, we write a small fixed-size // index file at filename(kind, key), using the values supplied by the // caller. The index file contains the hash that identifies the value -// file in the CAS. (We could add a small amount of extra metadata to -// this file if later desired.) Because the index file is small, +// file in the CAS. (We could add extra metadata to this file, up to +// 512B, the minimum size of a disk block, if later desired, so long +// as the total size remains fixed.) Because the index file is small, // concurrent writes to it are atomic in practice, even though this is -// not guaranteed by any OS. +// not guaranteed by any OS. The fixed size ensures that readers can't +// see a palimpsest when a short new file overwrites a longer old one. // // New versions of gopls are free to reorganize the contents of the // version directory as needs evolve. But all versions of gopls must @@ -275,7 +290,7 @@ func SetBudget(new int64) (old int64) { // the entire gopls directory so that newer binaries can clean up // after older ones: in the development cycle especially, new // new versions may be created frequently. - +// // TODO(adonovan): opt: use "VVVVVVVV / KK / KKKK...KKKK-kind" to // avoid creating 256 directories per distinct kind (+ cas). func filename(kind string, key [32]byte) (string, error) { From 4d66324eeed2eced1cb958b155686bbf172c0cbe Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 14:51:17 -0400 Subject: [PATCH 044/109] gopls/internal/lsp/cache: tweak error message Change-Id: I0dacb32e76b0e657d1e0893f71da8f1ce38b7d50 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496441 TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan Reviewed-by: Robert Findley gopls-CI: kokoro Auto-Submit: Alan Donovan --- gopls/internal/lsp/cache/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 64e1b55bbcf..7567589a82e 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -1833,7 +1833,7 @@ searchOverlays: fix = `To work with multiple modules simultaneously, please upgrade to Go 1.18 or later, reinstall gopls, and use a go.work file.` } - msg = fmt.Sprintf(`This file is in directory %q, which is not included in your workspace. + msg = fmt.Sprintf(`This file is within module %q, which is not included in your workspace. %s See the documentation for more information on setting up your workspace: https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`, modDir, fix) From b742cb9a5ed7bcd56636d0ebe4f3afe7bab0c628 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 19 May 2023 10:22:58 -0400 Subject: [PATCH 045/109] gopls/internal/regtest/bench: add a benchmark for diagnosing saves As we discovered while investigating golang/go#60089, mod tidy operations can significantly affect the amount of time it takes gopls to diagnose a saved file. Add a benchmark for this operation. For reference, this new benchmark takes 8s+ on google-cloud-go, vs 300ms for DiagnoseChange (without the save). Updates golang/go#60089 Change-Id: Ie88bd63dd7d205b8629173e7f84aa1aa9858016b Reviewed-on: https://go-review.googlesource.com/c/tools/+/496435 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- .../internal/regtest/bench/didchange_test.go | 100 +++++++++++------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go index 6bde10e1452..2030f325728 100644 --- a/gopls/internal/regtest/bench/didchange_test.go +++ b/gopls/internal/regtest/bench/didchange_test.go @@ -19,11 +19,13 @@ import ( // shared file cache. var editID int64 = time.Now().UnixNano() -var didChangeTests = []struct { +type changeTest struct { repo string file string -}{ - {"google-cloud-go", "httpreplay/httpreplay.go"}, +} + +var didChangeTests = []changeTest{ + {"google-cloud-go", "internal/annotate.go"}, {"istio", "pkg/fuzz/util.go"}, {"kubernetes", "pkg/controller/lookup_cache.go"}, {"kuma", "api/generic/insights.go"}, @@ -64,43 +66,63 @@ func BenchmarkDidChange(b *testing.B) { func BenchmarkDiagnoseChange(b *testing.B) { for _, test := range didChangeTests { - b.Run(test.repo, func(b *testing.B) { - sharedEnv := getRepo(b, test.repo).sharedEnv(b) - config := fake.EditorConfig{ - Env: map[string]string{ - "GOPATH": sharedEnv.Sandbox.GOPATH(), - }, - Settings: map[string]interface{}{ - "diagnosticsDelay": "0s", - }, - } - // Use a new env to avoid the diagnostic delay: we want to measure how - // long it takes to produce the diagnostics. - env := getRepo(b, test.repo).newEnv(b, "diagnoseChange", config) - defer env.Close() - env.OpenFile(test.file) - // Insert the text we'll be modifying at the top of the file. - env.EditBuffer(test.file, protocol.TextEdit{NewText: "// __REGTEST_PLACEHOLDER_0__\n"}) - env.AfterChange() - b.ResetTimer() + runChangeDiagnosticsBenchmark(b, test, false) + } +} + +// TODO(rfindley): add a benchmark for with a metadata-affecting change, when +// this matters. +func BenchmarkDiagnoseSave(b *testing.B) { + for _, test := range didChangeTests { + runChangeDiagnosticsBenchmark(b, test, true) + } +} + +// runChangeDiagnosticsBenchmark runs a benchmark to edit the test file and +// await the resulting diagnostics pass. If save is set, the file is also saved. +func runChangeDiagnosticsBenchmark(b *testing.B, test changeTest, save bool) { + b.Run(test.repo, func(b *testing.B) { + sharedEnv := getRepo(b, test.repo).sharedEnv(b) + config := fake.EditorConfig{ + Env: map[string]string{ + "GOPATH": sharedEnv.Sandbox.GOPATH(), + }, + Settings: map[string]interface{}{ + "diagnosticsDelay": "0s", + }, + } + // Use a new env to avoid the diagnostic delay: we want to measure how + // long it takes to produce the diagnostics. + env := getRepo(b, test.repo).newEnv(b, "diagnoseSave", config) + defer env.Close() + env.OpenFile(test.file) + // Insert the text we'll be modifying at the top of the file. + env.EditBuffer(test.file, protocol.TextEdit{NewText: "// __REGTEST_PLACEHOLDER_0__\n"}) + if save { + env.SaveBuffer(test.file) + } + env.AfterChange() + b.ResetTimer() - // We must use an extra subtest layer here, so that we only set up the - // shared env once (otherwise we pay additional overhead and the profiling - // flags don't work). - b.Run("diagnose", func(b *testing.B) { - for i := 0; i < b.N; i++ { - edits := atomic.AddInt64(&editID, 1) - env.EditBuffer(test.file, protocol.TextEdit{ - Range: protocol.Range{ - Start: protocol.Position{Line: 0, Character: 0}, - End: protocol.Position{Line: 1, Character: 0}, - }, - // Increment the placeholder text, to ensure cache misses. - NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", edits), - }) - env.AfterChange() + // We must use an extra subtest layer here, so that we only set up the + // shared env once (otherwise we pay additional overhead and the profiling + // flags don't work). + b.Run("diagnose", func(b *testing.B) { + for i := 0; i < b.N; i++ { + edits := atomic.AddInt64(&editID, 1) + env.EditBuffer(test.file, protocol.TextEdit{ + Range: protocol.Range{ + Start: protocol.Position{Line: 0, Character: 0}, + End: protocol.Position{Line: 1, Character: 0}, + }, + // Increment the placeholder text, to ensure cache misses. + NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", edits), + }) + if save { + env.SaveBuffer(test.file) } - }) + env.AfterChange() + } }) - } + }) } From 2eb726b887f4aee9069badb5ec21be5b62634ce5 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 13:23:06 -0400 Subject: [PATCH 046/109] gopls/internal/lsp/filecache: touch only files older than 1h This change implements an optimization similar to one in the go command's cache: we skip the chtimes(2) call to update the file access times unless it would change by at least one hour. This avoids turning every read into a write. It makes quite a difference to the Get benchmark: 330MB/s before, 430MB/s after. Change-Id: I5b5b6e99a1968c73bf1032b410bec989961c4a90 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496438 Reviewed-by: Robert Findley Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/lsp/filecache/filecache.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index 6441834090f..6f04aa86b40 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -110,24 +110,24 @@ func Get(kind string, key [32]byte) ([]byte, error) { // Update file times used by LRU eviction. // - // This turns every read into a write operation. - // If this is a performance problem, we should - // touch the files asynchronously, or, follow - // the approach used in the go command's cache - // and update only if the existing timestamp is - // older than, say, one hour. + // Because this turns a read into a write operation, + // we follow the approach used in the go command's + // cache and update the access time only if the + // existing timestamp is older than one hour. // // (Traditionally the access time would be updated // automatically, but for efficiency most POSIX systems have // for many years set the noatime mount option to avoid every // open or read operation entailing a metadata write.) now := time.Now() - if err := os.Chtimes(indexName, now, now); err != nil { - return nil, fmt.Errorf("failed to update access time of index file: %w", err) - } - if err := os.Chtimes(casName, now, now); err != nil { - return nil, fmt.Errorf("failed to update access time of CAS file: %w", err) + touch := func(filename string) { + st, err := os.Stat(filename) + if err == nil && now.Sub(st.ModTime()) > time.Hour { + os.Chtimes(filename, now, now) // ignore error + } } + touch(indexName) + touch(casName) if useMemCache { memCache.Set(memKey{kind, key}, value, len(value)) From d7f4359f81412d70847468c507b451c8618a08b4 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 19 May 2023 10:50:57 -0400 Subject: [PATCH 047/109] gopls/internal/lsp/mod: optimizations for mod tidy diagnostics Run mod tidy diagnostics in parallel, and don't parse files to determine missing imports if there are no missing requires. BenchmarkDiagnoseSave: 8s->1.8s For golang/go#60089 Change-Id: I5d41827914e4eb9264b16ed14af323c017eb327c Reviewed-on: https://go-review.googlesource.com/c/tools/+/496439 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/doc/commands.md | 3 ++ gopls/internal/lsp/cache/mod_tidy.go | 35 ++++++++++++++------ gopls/internal/lsp/command.go | 7 ++-- gopls/internal/lsp/command/interface.go | 5 ++- gopls/internal/lsp/mod/diagnostics.go | 43 +++++++++++++++++-------- gopls/internal/lsp/source/api_json.go | 2 +- 6 files changed, 66 insertions(+), 29 deletions(-) diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md index 8fe677b259b..b259f630cf4 100644 --- a/gopls/doc/commands.md +++ b/gopls/doc/commands.md @@ -267,6 +267,9 @@ Args: "URI": string, // The module path to remove. "ModulePath": string, + // If the module is tidied apart from the one unused diagnostic, we can + // run `go get module@none`, and then run `go mod tidy`. Otherwise, we + // must make textual edits. "OnlyDiagnostic": bool, } ``` diff --git a/gopls/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go index b5e2deacdb1..8dd555dae33 100644 --- a/gopls/internal/lsp/cache/mod_tidy.go +++ b/gopls/internal/lsp/cache/mod_tidy.go @@ -183,7 +183,33 @@ func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.Pars // go.mod file. The fixes will be for the go.mod file, but the // diagnostics should also appear in both the go.mod file and the import // statements in the Go files in which the dependencies are used. + // Finally, add errors for any unused dependencies. + if len(missing) > 0 { + missingModuleDiagnostics, err := missingModuleDiagnostics(ctx, snapshot, pm, ideal, missing) + if err != nil { + return nil, err + } + diagnostics = append(diagnostics, missingModuleDiagnostics...) + } + + // Opt: if this is the only diagnostic, we can avoid textual edits and just + // run the Go command. + // + // See also the documentation for command.RemoveDependencyArgs.OnlyDiagnostic. + onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1 + for _, req := range unused { + srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic) + if err != nil { + return nil, err + } + diagnostics = append(diagnostics, srcErr) + } + return diagnostics, nil +} + +func missingModuleDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File, missing map[string]*modfile.Require) ([]*source.Diagnostic, error) { missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{} + var diagnostics []*source.Diagnostic for _, req := range missing { srcDiag, err := missingModuleDiagnostic(pm, req) if err != nil { @@ -290,15 +316,6 @@ func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.Pars } } } - // Finally, add errors for any unused dependencies. - onlyDiagnostic := len(diagnostics) == 0 && len(unused) == 1 - for _, req := range unused { - srcErr, err := unusedDiagnostic(pm.Mapper, req, onlyDiagnostic) - if err != nil { - return nil, err - } - diagnostics = append(diagnostics, srcErr) - } return diagnostics, nil } diff --git a/gopls/internal/lsp/command.go b/gopls/internal/lsp/command.go index 7236087ddbd..7bbadc158d7 100644 --- a/gopls/internal/lsp/command.go +++ b/gopls/internal/lsp/command.go @@ -360,10 +360,9 @@ func (c *commandHandler) RemoveDependency(ctx context.Context, args command.Remo progress: "Removing dependency", forURI: args.URI, }, func(ctx context.Context, deps commandDeps) error { - // If the module is tidied apart from the one unused diagnostic, we can - // run `go get module@none`, and then run `go mod tidy`. Otherwise, we - // must make textual edits. - // TODO(rstambler): In Go 1.17+, we will be able to use the go command + // See the documentation for OnlyDiagnostic. + // + // TODO(rfindley): In Go 1.17+, we will be able to use the go command // without checking if the module is tidy. if args.OnlyDiagnostic { return c.s.runGoModUpdateCommands(ctx, deps.snapshot, args.URI.SpanURI(), func(invoke func(...string) (*bytes.Buffer, error)) error { diff --git a/gopls/internal/lsp/command/interface.go b/gopls/internal/lsp/command/interface.go index 1342e843810..ababac60cab 100644 --- a/gopls/internal/lsp/command/interface.go +++ b/gopls/internal/lsp/command/interface.go @@ -236,7 +236,10 @@ type RemoveDependencyArgs struct { // The go.mod file URI. URI protocol.DocumentURI // The module path to remove. - ModulePath string + ModulePath string + // If the module is tidied apart from the one unused diagnostic, we can + // run `go get module@none`, and then run `go mod tidy`. Otherwise, we + // must make textual edits. OnlyDiagnostic bool } diff --git a/gopls/internal/lsp/mod/diagnostics.go b/gopls/internal/lsp/mod/diagnostics.go index af5fe3829c3..cd1c85b2613 100644 --- a/gopls/internal/lsp/mod/diagnostics.go +++ b/gopls/internal/lsp/mod/diagnostics.go @@ -9,11 +9,14 @@ package mod import ( "context" "fmt" + "runtime" "sort" "strings" + "sync" "golang.org/x/mod/modfile" "golang.org/x/mod/semver" + "golang.org/x/sync/errgroup" "golang.org/x/tools/gopls/internal/govulncheck" "golang.org/x/tools/gopls/internal/lsp/command" "golang.org/x/tools/gopls/internal/lsp/protocol" @@ -58,24 +61,36 @@ func VulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot) (ma } func collectDiagnostics(ctx context.Context, snapshot source.Snapshot, diagFn func(context.Context, source.Snapshot, source.FileHandle) ([]*source.Diagnostic, error)) (map[span.URI][]*source.Diagnostic, error) { + + g, ctx := errgroup.WithContext(ctx) + cpulimit := runtime.GOMAXPROCS(0) + g.SetLimit(cpulimit) + + var mu sync.Mutex reports := make(map[span.URI][]*source.Diagnostic) + for _, uri := range snapshot.ModFiles() { - fh, err := snapshot.ReadFile(ctx, uri) - if err != nil { - return nil, err - } - reports[fh.URI()] = []*source.Diagnostic{} - diagnostics, err := diagFn(ctx, snapshot, fh) - if err != nil { - return nil, err - } - for _, d := range diagnostics { - fh, err := snapshot.ReadFile(ctx, d.URI) + uri := uri + g.Go(func() error { + fh, err := snapshot.ReadFile(ctx, uri) if err != nil { - return nil, err + return err } - reports[fh.URI()] = append(reports[fh.URI()], d) - } + diagnostics, err := diagFn(ctx, snapshot, fh) + if err != nil { + return err + } + for _, d := range diagnostics { + mu.Lock() + reports[d.URI] = append(reports[fh.URI()], d) + mu.Unlock() + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, err } return reports, nil } diff --git a/gopls/internal/lsp/source/api_json.go b/gopls/internal/lsp/source/api_json.go index 281772b889a..f777fdbd764 100644 --- a/gopls/internal/lsp/source/api_json.go +++ b/gopls/internal/lsp/source/api_json.go @@ -760,7 +760,7 @@ var GeneratedAPIJSON = &APIJSON{ Command: "gopls.remove_dependency", Title: "Remove a dependency", Doc: "Removes a dependency from the go.mod file of a module.", - ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}", + ArgDoc: "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t// If the module is tidied apart from the one unused diagnostic, we can\n\t// run `go get module@none`, and then run `go mod tidy`. Otherwise, we\n\t// must make textual edits.\n\t\"OnlyDiagnostic\": bool,\n}", }, { Command: "gopls.reset_go_mod_diagnostics", From a5ef6c3eb9847c2320bda0bfd97e52c334a6ed0a Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 19 May 2023 14:43:24 -0400 Subject: [PATCH 048/109] gopls/internal/lsp: keep track of overlays on the files map The overlays method showed up as a hot spot in the google-cloud-go repo (via reloadOrphanedFiles), because it walks all files to find the small number of overlays. We already have a filesMap abstraction; use it to keep track of overlays in a separate map. For golang/go#60089 Change-Id: I62c6c688d012beaa4b0f255225993da961cb9dad Reviewed-on: https://go-review.googlesource.com/c/tools/+/496442 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/lsp/cache/maps.go | 35 ++++++++++++++++++++++++---- gopls/internal/lsp/cache/snapshot.go | 12 +--------- 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/gopls/internal/lsp/cache/maps.go b/gopls/internal/lsp/cache/maps.go index 0ad4ac90f1c..533c3397b42 100644 --- a/gopls/internal/lsp/cache/maps.go +++ b/gopls/internal/lsp/cache/maps.go @@ -15,7 +15,8 @@ import ( // TODO(euroelessar): Use generics once support for go1.17 is dropped. type filesMap struct { - impl *persistent.Map + impl *persistent.Map + overlayMap map[span.URI]*Overlay // the subset that are overlays } // uriLessInterface is the < relation for "any" values containing span.URIs. @@ -25,13 +26,19 @@ func uriLessInterface(a, b interface{}) bool { func newFilesMap() filesMap { return filesMap{ - impl: persistent.NewMap(uriLessInterface), + impl: persistent.NewMap(uriLessInterface), + overlayMap: make(map[span.URI]*Overlay), } } func (m filesMap) Clone() filesMap { + overlays := make(map[span.URI]*Overlay, len(m.overlayMap)) + for k, v := range m.overlayMap { + overlays[k] = v + } return filesMap{ - impl: m.impl.Clone(), + impl: m.impl.Clone(), + overlayMap: overlays, } } @@ -55,10 +62,30 @@ func (m filesMap) Range(do func(key span.URI, value source.FileHandle)) { func (m filesMap) Set(key span.URI, value source.FileHandle) { m.impl.Set(key, value, nil) + + if o, ok := value.(*Overlay); ok { + m.overlayMap[key] = o + } else { + // Setting a non-overlay must delete the corresponding overlay, to preserve + // the accuracy of the overlay set. + delete(m.overlayMap, key) + } } -func (m filesMap) Delete(key span.URI) { +func (m *filesMap) Delete(key span.URI) { m.impl.Delete(key) + delete(m.overlayMap, key) +} + +// overlays returns a new unordered array of overlay files. +func (m filesMap) overlays() []*Overlay { + // In practice we will always have at least one overlay, so there is no need + // to optimize for the len=0 case by returning a nil slice. + overlays := make([]*Overlay, 0, len(m.overlayMap)) + for _, o := range m.overlayMap { + overlays = append(overlays, o) + } + return overlays } func packageIDLessInterface(x, y interface{}) bool { diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 7567589a82e..bf22164ebcd 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -624,21 +624,11 @@ func (s *snapshot) buildOverlay() map[string][]byte { return overlays } -// TODO(rfindley): investigate whether it would be worthwhile to keep track of -// overlays when we get them via GetFile. func (s *snapshot) overlays() []*Overlay { s.mu.Lock() defer s.mu.Unlock() - var overlays []*Overlay - s.files.Range(func(uri span.URI, fh source.FileHandle) { - overlay, ok := fh.(*Overlay) - if !ok { - return - } - overlays = append(overlays, overlay) - }) - return overlays + return s.files.overlays() } // Package data kinds, identifying various package data that may be stored in From 5919673c90539aec54737a8df36480459c2fed5f Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 12:15:22 -0400 Subject: [PATCH 049/109] internal/lsp/filecache: eliminate 'kind' directories This CL causes the kind component of each cache file to be represented as a suffix, and no longer a complete path segment. This avoids the creation of 7 directory trees (6 application kinds + cas) each containing 256 subdirectories. The proliferation of kinds was causing the storage requirements to increase well beyond (2.2x) the nominal budget, because the accounting for the latter ignores directories. This also reduces the number of directory lookups required for each file operation. Also, report the GOPLSCACHE environment variable and the computed executable-specific cache directory in the output of 'gopls stats'. Change-Id: Ibbebbf2bc10afd08b84444b8f71d0d110d5ae655 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496437 Reviewed-by: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan --- gopls/internal/lsp/cmd/stats.go | 8 ++- gopls/internal/lsp/filecache/filecache.go | 69 +++++++++++++---------- 2 files changed, 46 insertions(+), 31 deletions(-) diff --git a/gopls/internal/lsp/cmd/stats.go b/gopls/internal/lsp/cmd/stats.go index 1b9df2f5cd7..f0d2f6db560 100644 --- a/gopls/internal/lsp/cmd/stats.go +++ b/gopls/internal/lsp/cmd/stats.go @@ -68,6 +68,7 @@ func (s *stats) Run(ctx context.Context, args ...string) error { stats := GoplsStats{ GOOS: runtime.GOOS, GOARCH: runtime.GOARCH, + GOPLSCACHE: os.Getenv("GOPLSCACHE"), GoVersion: runtime.Version(), GoplsVersion: debug.Version, } @@ -140,7 +141,9 @@ func (s *stats) Run(ctx context.Context, args ...string) error { // this executable and persisted in the cache. stats.BugReports = []string{} // non-nil for JSON do("Gathering bug reports", func() error { - for _, report := range filecache.BugReports() { + cacheDir, reports := filecache.BugReports() + stats.CacheDir = cacheDir + for _, report := range reports { stats.BugReports = append(stats.BugReports, string(report)) } return nil @@ -193,10 +196,11 @@ func (s *stats) Run(ctx context.Context, args ...string) error { } type GoplsStats struct { - GOOS, GOARCH string + GOOS, GOARCH, GOPLSCACHE string GoVersion string GoplsVersion string InitialWorkspaceLoadDuration string // in time.Duration string form + CacheDir string BugReports []string MemStats command.MemStatsResult WorkspaceStats command.WorkspaceStatsResult diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index 6f04aa86b40..140ae97c3f5 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -32,6 +32,7 @@ import ( "os" "path/filepath" "sort" + "strings" "sync" "sync/atomic" "time" @@ -203,7 +204,11 @@ func writeFileNoTrunc(filename string, data []byte, perm os.FileMode) error { return err } -const casKind = "cas" // kind for CAS (content-addressable store) files +// reserved kind strings +const ( + casKind = "cas" // content-addressable store files + bugKind = "bug" // gopls bug reports +) var iolimit = make(chan struct{}, 128) // counting semaphore to limit I/O concurrency in Set. @@ -233,15 +238,19 @@ func SetBudget(new int64) (old int64) { // // A typical cache file has a name such as: // -// $HOME/Library/Caches / gopls / VVVVVVVV / kind / KK / KKKK...KKKK +// $HOME/Library/Caches / gopls / VVVVVVVV / KK / KKKK...KKKK - kind // // The portions separated by spaces are as follows: // - The user's preferred cache directory; the default value varies by OS. // - The constant "gopls". // - The "version", 32 bits of the digest of the gopls executable. -// - The kind or purpose of this cache subtree (e.g. "analysis"). // - The first 8 bits of the key, to avoid huge directories. // - The full 256 bits of the key. +// - The kind or purpose of this cache file (e.g. "analysis"). +// +// The kind establishes a namespace for the keys. It is represented as +// a suffix, not a segment, as this significantly reduces the number +// of directories created, and thus the storage overhead. // // Previous iterations of the design aimed for the invariant that once // a file is written, its contents are never modified, though it may @@ -290,16 +299,14 @@ func SetBudget(new int64) (old int64) { // the entire gopls directory so that newer binaries can clean up // after older ones: in the development cycle especially, new // new versions may be created frequently. -// -// TODO(adonovan): opt: use "VVVVVVVV / KK / KKKK...KKKK-kind" to -// avoid creating 256 directories per distinct kind (+ cas). func filename(kind string, key [32]byte) (string, error) { - hex := fmt.Sprintf("%x", key) + base := fmt.Sprintf("%x-%s", key, kind) dir, err := getCacheDir() if err != nil { return "", err } - return filepath.Join(dir, kind, hex[:2], hex), nil + // Keep the BugReports function consistent with this one. + return filepath.Join(dir, base[:2], base), nil } // getCacheDir returns the persistent cache directory of all processes @@ -526,8 +533,6 @@ func gc(goplsDir string) { } } -const bugKind = "bug" // reserved kind for gopls bug reports - func init() { // Register a handler to durably record this process's first // assertion failure in the cache so that we can ask users to @@ -544,29 +549,35 @@ func init() { // BugReports returns a new unordered array of the contents // of all cached bug reports produced by this executable. -func BugReports() [][]byte { +// It also returns the location of the cache directory +// used by this process (or "" on initialization error). +func BugReports() (string, [][]byte) { + // To test this logic, run: + // $ TEST_GOPLS_BUG=oops gopls stats # trigger a bug + // $ gopls stats # list the bugs + dir, err := getCacheDir() if err != nil { - return nil // ignore initialization errors + return "", nil // ignore initialization errors } var result [][]byte - _ = filepath.Walk(filepath.Join(dir, bugKind), - func(path string, info fs.FileInfo, err error) error { - if err != nil { - return nil // ignore readdir/stat errors + _ = filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return nil // ignore readdir/stat errors + } + // Parse the key from each "XXXX-bug" cache file name. + if !info.IsDir() && strings.HasSuffix(path, bugKind) { + var key [32]byte + n, err := hex.Decode(key[:], []byte(filepath.Base(path)[:len(key)*2])) + if err != nil || n != len(key) { + return nil // ignore malformed file names } - if !info.IsDir() { - var key [32]byte - n, err := hex.Decode(key[:], []byte(filepath.Base(path))) - if err != nil || n != len(key) { - return nil // ignore malformed file names - } - content, err := Get(bugKind, key) - if err == nil { // ignore read errors - result = append(result, content) - } + content, err := Get(bugKind, key) + if err == nil { // ignore read errors + result = append(result, content) } - return nil - }) - return result + } + return nil + }) + return dir, result } From 43b02eab0248f2a2502a59f6d107fb0395913a70 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 19 May 2023 16:18:30 -0400 Subject: [PATCH 050/109] gopls/internal/lsp/cache: only delete the most relevant mod tidy handle For workspaces with a lot of modules, deleting every mod tidy handle on every save is too expensive. Approximate the correct behavior by deleting only the most relevant mod file. See the comments in the code for an explanation of why this is an approximation, and why is is probably acceptable. This decreases the DiagnoseSave benchmark for google-cloud-go to 550ms (from 1.8s). For golang/go#60089 Change-Id: I94bea0b00b13468f73f921db789292cfa2b8d3e9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496595 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/internal/lsp/cache/snapshot.go | 59 ++++++++++++++++++++++++++-- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index bf22164ebcd..e0a81130c53 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -2083,10 +2083,36 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // Invalidate the previous modTidyHandle if any of the files have been // saved or if any of the metadata has been invalidated. if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) { - // TODO(maybe): Only delete mod handles for - // which the withoutURI is relevant. - // Requires reverse-engineering the go command. (!) - result.modTidyHandles.Clear() + // Only invalidate mod tidy results for the most relevant modfile in the + // workspace. This is a potentially lossy optimization for workspaces + // with many modules (such as google-cloud-go, which has 145 modules as + // of writing). + // + // While it is theoretically possible that a change in workspace module A + // could affect the mod-tidiness of workspace module B (if B transitively + // requires A), such changes are probably unlikely and not worth the + // penalty of re-running go mod tidy for everything. Note that mod tidy + // ignores GOWORK, so the two modules would have to be related by a chain + // of replace directives. + // + // We could improve accuracy by inspecting replace directives, using + // overlays in go mod tidy, and/or checking for metadata changes from the + // on-disk content. + // + // Note that we iterate the modTidyHandles map here, rather than e.g. + // using nearestModFile, because we don't have access to an accurate + // FileSource at this point in the snapshot clone. + const onlyInvalidateMostRelevant = true + if onlyInvalidateMostRelevant { + deleteMostRelevantModFile(result.modTidyHandles, uri) + } else { + result.modTidyHandles.Clear() + } + + // TODO(rfindley): should we apply the above heuristic to mod vuln + // or mod handles as well? + // + // TODO(rfindley): no tests fail if I delete the below line. result.modWhyHandles.Clear() result.modVulnHandles.Clear() } @@ -2277,6 +2303,31 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC return result, release } +// deleteMostRelevantModFile deletes the mod file most likely to be the mod +// file for the changed URI, if it exists. +// +// Specifically, this is the longest mod file path in a directory containing +// changed. This might not be accurate if there is another mod file closer to +// changed that happens not to be present in the map, but that's OK: the goal +// of this function is to guarantee that IF the nearest mod file is present in +// the map, it is invalidated. +func deleteMostRelevantModFile(m *persistent.Map, changed span.URI) { + var mostRelevant span.URI + changedFile := changed.Filename() + + m.Range(func(key, value interface{}) { + modURI := key.(span.URI) + if len(modURI) > len(mostRelevant) { + if source.InDir(filepath.Dir(modURI.Filename()), changedFile) { + mostRelevant = modURI + } + } + }) + if mostRelevant != "" { + m.Delete(mostRelevant) + } +} + // invalidatedPackageIDs returns all packages invalidated by a change to uri. // If we haven't seen this URI before, we guess based on files in the same // directory. This is of course incorrect in build systems where packages are From 2ec4299f382713b37296f2a7eb57f2847d5f835a Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 18 May 2023 13:45:43 -0400 Subject: [PATCH 051/109] gopls/internal/lsp: split file-watching glob patterns This change causes the N file-watching glob patterns, one per directory, to be sent not as one large comma- separated string, but as a list of N individual directories. This appears to reliably work around a bug in VSCode whereby saving a buffer twice without delay, in a workspace with 8,000 watched directories, would cause it to get stuck for several minutes processing file-watching glob patterns. It is possible that this change is pathological for other LSP clients; we should test. It may still be better to replace the list of patterns by a single "all files recursively" pattern, as we have discussed doing. Emacs was much improved; nvim regressed. We may need to selective based on the client type; we'll do that as a follow-up. Many thanks to Cody Oss for helping us identify this bug in Code - OSS (https://github.com/microsoft/vscode). Also, include the gopls pid in the server debug info. It was useful when debugging. Fixes golang/go#60089 Change-Id: I9f58cf8c81daacbbbffbf810e9bd150891f171d6 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496186 Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan Reviewed-by: Robert Findley --- gopls/internal/lsp/cache/session.go | 30 ++++++------- gopls/internal/lsp/cache/snapshot.go | 67 +++++++++++++++------------- gopls/internal/lsp/debug/info.go | 2 + gopls/internal/lsp/general.go | 11 ++--- gopls/internal/lsp/server.go | 3 +- 5 files changed, 58 insertions(+), 55 deletions(-) diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go index eaad67c8e06..e13f4c8a474 100644 --- a/gopls/internal/lsp/cache/session.go +++ b/gopls/internal/lsp/cache/session.go @@ -596,20 +596,17 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes for _, c := range changes { if !knownDirs.Contains(c.URI) { result = append(result, c) - continue - } - affectedFiles := knownFilesInDir(ctx, snapshots, c.URI) - var fileChanges []source.FileModification - for uri := range affectedFiles { - fileChanges = append(fileChanges, source.FileModification{ - URI: uri, - Action: c.Action, - LanguageID: "", - OnDisk: c.OnDisk, - // changes to directories cannot include text or versions - }) + } else { + for uri := range knownFilesInDir(ctx, snapshots, c.URI) { + result = append(result, source.FileModification{ + URI: uri, + Action: c.Action, + LanguageID: "", + OnDisk: c.OnDisk, + // changes to directories cannot include text or versions + }) + } } - result = append(result, fileChanges...) } return result } @@ -738,9 +735,10 @@ func (fs *overlayFS) updateOverlays(ctx context.Context, changes []source.FileMo return nil } -// FileWatchingGlobPatterns returns glob patterns to watch every directory -// known by the view. For views within a module, this is the module root, -// any directory in the module root, and any replace targets. +// FileWatchingGlobPatterns returns a new set of glob patterns to +// watch every directory known by the view. For views within a module, +// this is the module root, any directory in the module root, and any +// replace targets. func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { s.viewMu.Lock() defer s.viewMu.Unlock() diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index e0a81130c53..948912a549a 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -159,10 +159,10 @@ type snapshot struct { modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult] modVulnHandles *persistent.Map // from span.URI to *memoize.Promise[modVulnResult] - // knownSubdirs is the set of subdirectories in the workspace, used to - // create glob patterns for file watching. - knownSubdirs knownDirsSet - knownSubdirsPatternCache string + // knownSubdirs is the set of subdirectory URIs in the workspace, + // used to create glob patterns for file watching. + knownSubdirs knownDirsSet + knownSubdirsCache map[string]struct{} // memo of knownSubdirs as a set of filenames // unprocessedSubdirChanges are any changes that might affect the set of // subdirectories in the workspace. They are not reflected to knownSubdirs // during the snapshot cloning step as it can slow down cloning. @@ -936,19 +936,31 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{} } - // Some clients do not send notifications for changes to directories that - // contain Go code (golang/go#42348). To handle this, explicitly watch all - // of the directories in the workspace. We find them by adding the - // directories of every file in the snapshot's workspace directories. - // There may be thousands. - if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" { - patterns[pattern] = struct{}{} - } + // Some clients (e.g. VSCode) do not send notifications for + // changes to directories that contain Go code (golang/go#42348). + // To handle this, explicitly watch all of the directories in + // the workspace. We find them by adding the directories of + // every file in the snapshot's workspace directories. + // There may be thousands of patterns, each a single directory. + // + // (A previous iteration created a single glob pattern holding a + // union of all the directories, but this was found to cause + // VSCode to get stuck for several minutes after a buffer was + // saved twice in a workspace that had >8000 watched directories.) + // + // Some clients (notably coc.nvim, which uses watchman for + // globs) perform poorly with a large list of individual + // directories, though they work fine with one large + // comma-separated element. Sadly no one size fits all, so we + // may have to resort to sniffing the client to determine the + // best behavior, though that would set a poor precedent. + // TODO(adonovan): improve the nvim situation. + s.addKnownSubdirs(patterns, dirs) return patterns } -func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string { +func (s *snapshot) addKnownSubdirs(patterns map[string]struct{}, wsDirs []span.URI) { s.mu.Lock() defer s.mu.Unlock() @@ -957,23 +969,18 @@ func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string { // It may change list of known subdirs and therefore invalidate the cache. s.applyKnownSubdirsChangesLocked(wsDirs) - if s.knownSubdirsPatternCache == "" { - var builder strings.Builder + // TODO(adonovan): is it still necessary to memoize the Range + // and URI.Filename operations? + if s.knownSubdirsCache == nil { + s.knownSubdirsCache = make(map[string]struct{}) s.knownSubdirs.Range(func(uri span.URI) { - if builder.Len() == 0 { - builder.WriteString("{") - } else { - builder.WriteString(",") - } - builder.WriteString(uri.Filename()) + s.knownSubdirsCache[uri.Filename()] = struct{}{} }) - if builder.Len() > 0 { - builder.WriteString("}") - s.knownSubdirsPatternCache = builder.String() - } } - return s.knownSubdirsPatternCache + for pattern := range s.knownSubdirsCache { + patterns[pattern] = struct{}{} + } } // collectAllKnownSubdirs collects all of the subdirectories within the @@ -987,7 +994,7 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { s.knownSubdirs.Destroy() s.knownSubdirs = newKnownDirsSet() - s.knownSubdirsPatternCache = "" + s.knownSubdirsCache = nil s.files.Range(func(uri span.URI, fh source.FileHandle) { s.addKnownSubdirLocked(uri, dirs) }) @@ -1046,7 +1053,7 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { } s.knownSubdirs.Insert(uri) dir = filepath.Dir(dir) - s.knownSubdirsPatternCache = "" + s.knownSubdirsCache = nil } } @@ -1059,7 +1066,7 @@ func (s *snapshot) removeKnownSubdirLocked(uri span.URI) { } if info, _ := os.Stat(dir); info == nil { s.knownSubdirs.Remove(uri) - s.knownSubdirsPatternCache = "" + s.knownSubdirsCache = nil } dir = filepath.Dir(dir) } @@ -2024,7 +2031,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // changed files. We need to rebuild the workspace module to know the // true set of known subdirectories, but we don't want to do that in clone. result.knownSubdirs = s.knownSubdirs.Clone() - result.knownSubdirsPatternCache = s.knownSubdirsPatternCache + result.knownSubdirsCache = s.knownSubdirsCache for _, c := range changes { result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c) } diff --git a/gopls/internal/lsp/debug/info.go b/gopls/internal/lsp/debug/info.go index 00752e6f9a3..5ce23fc2f59 100644 --- a/gopls/internal/lsp/debug/info.go +++ b/gopls/internal/lsp/debug/info.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "io" + "os" "reflect" "runtime" "runtime/debug" @@ -67,6 +68,7 @@ func (i *Instance) PrintServerInfo(ctx context.Context, w io.Writer) { section(w, HTML, "Server Instance", func() { fmt.Fprintf(w, "Start time: %v\n", i.StartTime) fmt.Fprintf(w, "LogFile: %s\n", i.Logfile) + fmt.Fprintf(w, "pid: %d\n", os.Getpid()) fmt.Fprintf(w, "Working directory: %s\n", i.Workdir) fmt.Fprintf(w, "Address: %s\n", i.ServerAddress) fmt.Fprintf(w, "Debug address: %s\n", i.DebugAddress()) diff --git a/gopls/internal/lsp/general.go b/gopls/internal/lsp/general.go index 9d12f97966b..4baf98c7c23 100644 --- a/gopls/internal/lsp/general.go +++ b/gopls/internal/lsp/general.go @@ -445,14 +445,13 @@ func equalURISet(m1, m2 map[string]struct{}) bool { // registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles // registrations to the client and updates s.watchedDirectories. +// The caller must not subsequently mutate patterns. func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns map[string]struct{}) error { if !s.session.Options().DynamicWatchedFilesSupported { return nil } - for k := range s.watchedGlobPatterns { - delete(s.watchedGlobPatterns, k) - } - watchers := []protocol.FileSystemWatcher{} // must be a slice + s.watchedGlobPatterns = patterns + watchers := make([]protocol.FileSystemWatcher, 0, len(patterns)) // must be a slice val := protocol.WatchChange | protocol.WatchDelete | protocol.WatchCreate for pattern := range patterns { watchers = append(watchers, protocol.FileSystemWatcher{ @@ -473,10 +472,6 @@ func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, patterns return err } s.watchRegistrationCount++ - - for k, v := range patterns { - s.watchedGlobPatterns[k] = v - } return nil } diff --git a/gopls/internal/lsp/server.go b/gopls/internal/lsp/server.go index 9f82e90e63e..db695650967 100644 --- a/gopls/internal/lsp/server.go +++ b/gopls/internal/lsp/server.go @@ -29,7 +29,7 @@ func NewServer(session *cache.Session, client protocol.ClientCloser) *Server { return &Server{ diagnostics: map[span.URI]*fileReports{}, gcOptimizationDetails: make(map[source.PackageID]struct{}), - watchedGlobPatterns: make(map[string]struct{}), + watchedGlobPatterns: nil, // empty changedFiles: make(map[span.URI]struct{}), session: session, client: client, @@ -85,6 +85,7 @@ type Server struct { // watchedGlobPatterns is the set of glob patterns that we have requested // the client watch on disk. It will be updated as the set of directories // that the server should watch changes. + // The map field may be reassigned but the map is immutable. watchedGlobPatternsMu sync.Mutex watchedGlobPatterns map[string]struct{} watchRegistrationCount int From 738ea2bdc9a1bd7bbdf6f9a71489e7cad30c75d3 Mon Sep 17 00:00:00 2001 From: Tim King Date: Thu, 18 May 2023 09:58:38 -0700 Subject: [PATCH 052/109] go/ssa: use core type for field accesses Change Field and FieldAddr to consistently use the core type instead of the underlying type to determine if the address is a pointer or not. Change-Id: I9a5e31497c1ff4ca733848d7f2a51e5e83ace7e8 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496215 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Tim King --- go/ssa/builder_generic_test.go | 2 +- go/ssa/emit.go | 6 +++--- go/ssa/ssa.go | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index 77de3268bc1..c86da0cc8e8 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -685,7 +685,7 @@ func TestInstructionString(t *testing.T) { //@ instrs("f12", "*ssa.MakeMap", "make map[P]bool 1:int") func f12[T any, P *struct{f T}](x T) map[P]bool { return map[P]bool{{}: true} } - //@ instrs("f13", "&v[0:int]") + //@ instrs("f13", "*ssa.IndexAddr", "&v[0:int]") //@ instrs("f13", "*ssa.Store", "*t0 = 7:int", "*v = *new(A):A") func f13[A [3]int, PA *A](v PA) { *v = A{7} diff --git a/go/ssa/emit.go b/go/ssa/emit.go index 80e30b6c215..fe2f6f0f6d6 100644 --- a/go/ssa/emit.go +++ b/go/ssa/emit.go @@ -476,7 +476,7 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - if st, vptr := deptr(v.Type()); vptr { + if st, vptr := deref(v.Type()); vptr { fld := fieldOf(st, index) instr := &FieldAddr{ X: v, @@ -486,7 +486,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) instr.setType(types.NewPointer(fld.Type())) v = f.emit(instr) // Load the field's value iff indirectly embedded. - if _, fldptr := deptr(fld.Type()); fldptr { + if _, fldptr := deref(fld.Type()); fldptr { v = emitLoad(f, v) } } else { @@ -510,7 +510,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - if st, vptr := deptr(v.Type()); vptr { + if st, vptr := deref(v.Type()); vptr { fld := fieldOf(st, index) instr := &FieldAddr{ X: v, diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go index eeb968130f8..313146d3351 100644 --- a/go/ssa/ssa.go +++ b/go/ssa/ssa.go @@ -865,7 +865,7 @@ type Slice struct { type FieldAddr struct { register X Value // *struct - Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field) + Field int // index into CoreType(CoreType(X.Type()).(*types.Pointer).Elem()).(*types.Struct).Fields } // The Field instruction yields the Field of struct X. @@ -884,7 +884,7 @@ type FieldAddr struct { type Field struct { register X Value // struct - Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields + Field int // index into CoreType(X.Type()).(*types.Struct).Fields } // The IndexAddr instruction yields the address of the element at From d4e66bd9ab95380639d1a3a631c3b6ef81403723 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 16:03:25 -0400 Subject: [PATCH 053/109] go/ssa: TestStdlib: disable check that function names are distinct Even though the test doesn't request test packages, and thus doesn't encounter test variants, go list may now report PGO variants of non-test packages that would violate this assertion. Fixes golang/go#60263 Change-Id: I247e8265e380976aa26f8e4cba13445fda62c703 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496575 gopls-CI: kokoro Run-TryBot: Alan Donovan Reviewed-by: Bryan Mills TryBot-Result: Gopher Robot Reviewed-by: Tim King Reviewed-by: Michael Pratt --- go/ssa/stdlib_test.go | 43 ++++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/go/ssa/stdlib_test.go b/go/ssa/stdlib_test.go index 8b9f4238da8..11782f778fe 100644 --- a/go/ssa/stdlib_test.go +++ b/go/ssa/stdlib_test.go @@ -36,7 +36,7 @@ func bytesAllocated() uint64 { func TestStdlib(t *testing.T) { if testing.Short() { - t.Skip("skipping in short mode; too slow (https://golang.org/issue/14113)") + t.Skip("skipping in short mode; too slow (https://golang.org/issue/14113)") // ~5s } testenv.NeedsTool(t, "go") @@ -81,20 +81,33 @@ func TestStdlib(t *testing.T) { allFuncs := ssautil.AllFunctions(prog) - // Check that all non-synthetic functions have distinct names. - // Synthetic wrappers for exported methods should be distinct too, - // except for unexported ones (explained at (*Function).RelString). - byName := make(map[string]*ssa.Function) - for fn := range allFuncs { - if fn.Synthetic == "" || ast.IsExported(fn.Name()) { - str := fn.String() - prev := byName[str] - byName[str] = fn - if prev != nil { - t.Errorf("%s: duplicate function named %s", - prog.Fset.Position(fn.Pos()), str) - t.Errorf("%s: (previously defined here)", - prog.Fset.Position(prev.Pos())) + // The assertion below is not valid if the program contains + // variants of the same package, such as the test variants + // (e.g. package p as compiled for test executable x) obtained + // when cfg.Tests=true. Profile-guided optimization may + // lead to similar variation for non-test executables. + // + // Ideally, the test would assert that all functions within + // each executable (more generally: within any singly rooted + // transitively closed subgraph of the import graph) have + // distinct names, but that isn't so easy to compute efficiently. + // Disabling for now. + if false { + // Check that all non-synthetic functions have distinct names. + // Synthetic wrappers for exported methods should be distinct too, + // except for unexported ones (explained at (*Function).RelString). + byName := make(map[string]*ssa.Function) + for fn := range allFuncs { + if fn.Synthetic == "" || ast.IsExported(fn.Name()) { + str := fn.String() + prev := byName[str] + byName[str] = fn + if prev != nil { + t.Errorf("%s: duplicate function named %s", + prog.Fset.Position(fn.Pos()), str) + t.Errorf("%s: (previously defined here)", + prog.Fset.Position(prev.Pos())) + } } } } From 07293620cd9cc9bc10c20dfc853c406da3dcab72 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Mon, 13 Mar 2023 23:42:54 +0800 Subject: [PATCH 054/109] present: reformat doc comment for lack of inline code Fixes golang/go#58999 Change-Id: I65886cd5b80b022b038462b8040453feafaaf7b2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/475975 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Russ Cox --- present/doc.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/present/doc.go b/present/doc.go index 71f758fb390..2c88fb990b0 100644 --- a/present/doc.go +++ b/present/doc.go @@ -200,8 +200,11 @@ There must be no spaces between markers. Within marked text, a single marker character becomes a space and a doubled single marker quotes the marker character. -Links can be included in any text with the form [[url][label]], or -[[url]] to use the URL itself as the label. +Links can be included in any text with either explicit labels +or the URL itself as the label. For example: + + [[url][label]] + [[url]] # Command Invocations From 3c025517650c76973a2bfc9e152fdde66aae77b7 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Sun, 21 May 2023 19:55:48 -0400 Subject: [PATCH 055/109] internal/typesinternal: remove NewObjectpathFunc Updates golang/go#58668 Fixes golang/go#60330 Change-Id: I06bf739e9278028cbafb174b93699fbdfe98882f Reviewed-on: https://go-review.googlesource.com/c/tools/+/496875 Run-TryBot: Alan Donovan gopls-CI: kokoro Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- internal/typesinternal/types.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/typesinternal/types.go b/internal/typesinternal/types.go index 3c53fbc63b9..ce7d4351b22 100644 --- a/internal/typesinternal/types.go +++ b/internal/typesinternal/types.go @@ -11,8 +11,6 @@ import ( "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// NewObjectpathEncoder returns a function closure equivalent to -// objectpath.For but amortized for multiple (sequential) calls. -// It is a temporary workaround, pending the approval of proposal 58668. -// -//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor -func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) From 3a5dbf351046aa8e826b87b452f0f28e082fc8cc Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 21 May 2023 11:03:28 -0400 Subject: [PATCH 056/109] gopls: add a new "subdirWatchPatterns" setting As discovered in golang/go#60089, file watching patterns behave very differently in different clients. We avoided a bad client-side bug in VS Code by splitting our subdirectory watch pattern, but this appears to be very expensive in other clients (notably coc.nvim, or any vim client that uses watchman). The subdirectory watch patterns were only known to be necessary for VS Code, due to microsoft/vscode#109754. Other clients work as expected when we watch e.g. **/*.go. For that reason, let's revert all other clients to just use simple watch patterns, and only specialize to have subdirectory watch patterns for VS Code. It's truly unfortunate to have to specialize in this way. To paper over this hole in the wall, add an internal setting that allows clients to configure this behavior explicitly. The new "subdirWatchPatterns" setting may accepts the following values: - "on": request watch patterns for each subdirectory (as before) - "off": do not request subdirectory watch patterns - "auto": same as "on" for VS Code, "off" for all others, based on the provided initializeParams.clientInfo.Name. Includes some minor cleanup for the fake editor, and fixes some stale comments. Updates golang/go#golang/go#60089 Fixes golang/go#59635 Change-Id: I1eab5c08790bd86a5910657169edcb20511c0280 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496835 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- gopls/internal/lsp/cache/snapshot.go | 66 +++++++++---- gopls/internal/lsp/fake/client.go | 5 +- gopls/internal/lsp/fake/editor.go | 95 ++++++++++--------- gopls/internal/lsp/general.go | 2 +- gopls/internal/lsp/regtest/expectation.go | 2 +- gopls/internal/lsp/regtest/options.go | 10 +- gopls/internal/lsp/source/options.go | 51 +++++++++- .../regtest/diagnostics/diagnostics_test.go | 8 +- gopls/internal/regtest/watch/setting_test.go | 85 +++++++++++++++++ 9 files changed, 251 insertions(+), 73 deletions(-) create mode 100644 gopls/internal/regtest/watch/setting_test.go diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 948912a549a..de9524bf0ae 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -936,30 +936,56 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{} } - // Some clients (e.g. VSCode) do not send notifications for - // changes to directories that contain Go code (golang/go#42348). - // To handle this, explicitly watch all of the directories in - // the workspace. We find them by adding the directories of - // every file in the snapshot's workspace directories. - // There may be thousands of patterns, each a single directory. - // - // (A previous iteration created a single glob pattern holding a - // union of all the directories, but this was found to cause - // VSCode to get stuck for several minutes after a buffer was - // saved twice in a workspace that had >8000 watched directories.) - // - // Some clients (notably coc.nvim, which uses watchman for - // globs) perform poorly with a large list of individual - // directories, though they work fine with one large - // comma-separated element. Sadly no one size fits all, so we - // may have to resort to sniffing the client to determine the - // best behavior, though that would set a poor precedent. - // TODO(adonovan): improve the nvim situation. - s.addKnownSubdirs(patterns, dirs) + if s.watchSubdirs() { + // Some clients (e.g. VS Code) do not send notifications for changes to + // directories that contain Go code (golang/go#42348). To handle this, + // explicitly watch all of the directories in the workspace. We find them + // by adding the directories of every file in the snapshot's workspace + // directories. There may be thousands of patterns, each a single + // directory. + // + // (A previous iteration created a single glob pattern holding a union of + // all the directories, but this was found to cause VS Code to get stuck + // for several minutes after a buffer was saved twice in a workspace that + // had >8000 watched directories.) + // + // Some clients (notably coc.nvim, which uses watchman for globs) perform + // poorly with a large list of individual directories. + s.addKnownSubdirs(patterns, dirs) + } return patterns } +// watchSubdirs reports whether gopls should request separate file watchers for +// each relevant subdirectory. This is necessary only for clients (namely VS +// Code) that do not send notifications for individual files in a directory +// when the entire directory is deleted. +func (s *snapshot) watchSubdirs() bool { + opts := s.view.Options() + switch p := opts.SubdirWatchPatterns; p { + case source.SubdirWatchPatternsOn: + return true + case source.SubdirWatchPatternsOff: + return false + case source.SubdirWatchPatternsAuto: + // See the documentation of InternalOptions.SubdirWatchPatterns for an + // explanation of why VS Code gets a different default value here. + // + // Unfortunately, there is no authoritative list of client names, nor any + // requirements that client names do not change. We should update the VS + // Code extension to set a default value of "subdirWatchPatterns" to "on", + // so that this workaround is only temporary. + if opts.ClientInfo != nil && opts.ClientInfo.Name == "Visual Studio Code" { + return true + } + return false + default: + bug.Reportf("invalid subdirWatchPatterns: %q", p) + return false + } +} + func (s *snapshot) addKnownSubdirs(patterns map[string]struct{}, wsDirs []span.URI) { s.mu.Lock() defer s.mu.Unlock() diff --git a/gopls/internal/lsp/fake/client.go b/gopls/internal/lsp/fake/client.go index b619ef51e3f..555428e9b1c 100644 --- a/gopls/internal/lsp/fake/client.go +++ b/gopls/internal/lsp/fake/client.go @@ -94,9 +94,8 @@ func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration results := make([]interface{}, len(p.Items)) for i, item := range p.Items { if item.Section == "gopls" { - c.editor.mu.Lock() - results[i] = c.editor.settingsLocked() - c.editor.mu.Unlock() + config := c.editor.Config() + results[i] = makeSettings(c.editor.sandbox, config) } } return results, nil diff --git a/gopls/internal/lsp/fake/editor.go b/gopls/internal/lsp/fake/editor.go index ae9338ddf18..45def8f0b7c 100644 --- a/gopls/internal/lsp/fake/editor.go +++ b/gopls/internal/lsp/fake/editor.go @@ -37,7 +37,6 @@ type Editor struct { serverConn jsonrpc2.Conn client *Client sandbox *Sandbox - defaultEnv map[string]string // TODO(adonovan): buffers should be keyed by protocol.DocumentURI. mu sync.Mutex @@ -75,8 +74,14 @@ func (b buffer) text() string { // source.UserOptions, but we use a separate type here so that we expose only // that configuration which we support. // -// The zero value for EditorConfig should correspond to its defaults. +// The zero value for EditorConfig is the default configuration. type EditorConfig struct { + // ClientName sets the clientInfo.name for the LSP session (in the initialize request). + // + // Since this can only be set during initialization, changing this field via + // Editor.ChangeConfiguration has no effect. + ClientName string + // Env holds environment variables to apply on top of the default editor // environment. When applying these variables, the special string // $SANDBOX_WORKDIR is replaced by the absolute path to the sandbox working @@ -109,10 +114,9 @@ type EditorConfig struct { // NewEditor creates a new Editor. func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor { return &Editor{ - buffers: make(map[string]buffer), - sandbox: sandbox, - defaultEnv: sandbox.GoEnv(), - config: config, + buffers: make(map[string]buffer), + sandbox: sandbox, + config: config, } } @@ -198,19 +202,17 @@ func (e *Editor) Client() *Client { return e.client } -// settingsLocked builds the settings map for use in LSP settings RPCs. -// -// e.mu must be held while calling this function. -func (e *Editor) settingsLocked() map[string]interface{} { +// makeSettings builds the settings map for use in LSP settings RPCs. +func makeSettings(sandbox *Sandbox, config EditorConfig) map[string]interface{} { env := make(map[string]string) - for k, v := range e.defaultEnv { + for k, v := range sandbox.GoEnv() { env[k] = v } - for k, v := range e.config.Env { + for k, v := range config.Env { env[k] = v } for k, v := range env { - v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) + v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", sandbox.Workdir.RootURI().SpanURI().Filename()) env[k] = v } @@ -226,7 +228,7 @@ func (e *Editor) settingsLocked() map[string]interface{} { "completionBudget": "10s", } - for k, v := range e.config.Settings { + for k, v := range config.Settings { if k == "env" { panic("must not provide env via the EditorConfig.Settings field: use the EditorConfig.Env field instead") } @@ -237,20 +239,22 @@ func (e *Editor) settingsLocked() map[string]interface{} { } func (e *Editor) initialize(ctx context.Context) error { + config := e.Config() + params := &protocol.ParamInitialize{} - params.ClientInfo = &protocol.Msg_XInitializeParams_clientInfo{} - params.ClientInfo.Name = "fakeclient" - params.ClientInfo.Version = "v1.0.0" - e.mu.Lock() - params.WorkspaceFolders = e.makeWorkspaceFoldersLocked() - params.InitializationOptions = e.settingsLocked() - e.mu.Unlock() - params.Capabilities.Workspace.Configuration = true - params.Capabilities.Window.WorkDoneProgress = true + if e.config.ClientName != "" { + params.ClientInfo = &protocol.Msg_XInitializeParams_clientInfo{} + params.ClientInfo.Name = e.config.ClientName + params.ClientInfo.Version = "v1.0.0" + } + params.InitializationOptions = makeSettings(e.sandbox, config) + params.WorkspaceFolders = makeWorkspaceFolders(e.sandbox, config.WorkspaceFolders) + params.Capabilities.Workspace.Configuration = true // support workspace/configuration + params.Capabilities.Window.WorkDoneProgress = true // support window/workDoneProgress - // TODO: set client capabilities - params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated} + // TODO(rfindley): set client capabilities (note from the future: why?) + params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated} params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true params.Capabilities.TextDocument.SemanticTokens.Requests.Full.Value = true // copied from lsp/semantic.go to avoid import cycle in tests @@ -269,11 +273,12 @@ func (e *Editor) initialize(ctx context.Context) error { // but really we should test both ways for older editors. params.Capabilities.TextDocument.DocumentSymbol.HierarchicalDocumentSymbolSupport = true - // This is a bit of a hack, since the fake editor doesn't actually support - // watching changed files that match a specific glob pattern. However, the - // editor does send didChangeWatchedFiles notifications, so set this to - // true. + // Glob pattern watching is enabled. params.Capabilities.Workspace.DidChangeWatchedFiles.DynamicRegistration = true + + // "rename" operations are used for package renaming. + // + // TODO(rfindley): add support for other resource operations (create, delete, ...) params.Capabilities.Workspace.WorkspaceEdit = &protocol.WorkspaceEditClientCapabilities{ ResourceOperations: []protocol.ResourceOperationKind{ "rename", @@ -300,18 +305,15 @@ func (e *Editor) initialize(ctx context.Context) error { return nil } -// makeWorkspaceFoldersLocked creates a slice of workspace folders to use for +// makeWorkspaceFolders creates a slice of workspace folders to use for // this editing session, based on the editor configuration. -// -// e.mu must be held while calling this function. -func (e *Editor) makeWorkspaceFoldersLocked() (folders []protocol.WorkspaceFolder) { - paths := e.config.WorkspaceFolders +func makeWorkspaceFolders(sandbox *Sandbox, paths []string) (folders []protocol.WorkspaceFolder) { if len(paths) == 0 { - paths = append(paths, string(e.sandbox.Workdir.RelativeTo)) + paths = []string{string(sandbox.Workdir.RelativeTo)} } for _, path := range paths { - uri := string(e.sandbox.Workdir.URI(path)) + uri := string(sandbox.Workdir.URI(path)) folders = append(folders, protocol.WorkspaceFolder{ URI: uri, Name: filepath.Base(uri), @@ -1329,14 +1331,18 @@ func (e *Editor) Config() EditorConfig { return e.config } +func (e *Editor) SetConfig(cfg EditorConfig) { + e.mu.Lock() + e.config = cfg + e.mu.Unlock() +} + // ChangeConfiguration sets the new editor configuration, and if applicable // sends a didChangeConfiguration notification. // // An error is returned if the change notification failed to send. func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig) error { - e.mu.Lock() - e.config = newConfig - e.mu.Unlock() // don't hold e.mu during server calls + e.SetConfig(newConfig) if e.Server != nil { var params protocol.DidChangeConfigurationParams // empty: gopls ignores the Settings field if err := e.Server.DidChangeConfiguration(ctx, ¶ms); err != nil { @@ -1351,12 +1357,13 @@ func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig // // The given folders must all be unique. func (e *Editor) ChangeWorkspaceFolders(ctx context.Context, folders []string) error { + config := e.Config() + // capture existing folders so that we can compute the change. - e.mu.Lock() - oldFolders := e.makeWorkspaceFoldersLocked() - e.config.WorkspaceFolders = folders - newFolders := e.makeWorkspaceFoldersLocked() - e.mu.Unlock() + oldFolders := makeWorkspaceFolders(e.sandbox, config.WorkspaceFolders) + newFolders := makeWorkspaceFolders(e.sandbox, folders) + config.WorkspaceFolders = folders + e.SetConfig(config) if e.Server == nil { return nil diff --git a/gopls/internal/lsp/general.go b/gopls/internal/lsp/general.go index 4baf98c7c23..04fe7136562 100644 --- a/gopls/internal/lsp/general.go +++ b/gopls/internal/lsp/general.go @@ -59,7 +59,7 @@ func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitializ if err := s.handleOptionResults(ctx, source.SetOptions(options, params.InitializationOptions)); err != nil { return nil, err } - options.ForClientCapabilities(params.Capabilities) + options.ForClientCapabilities(params.ClientInfo, params.Capabilities) if options.ShowBugReports { // Report the next bug that occurs on the server. diff --git a/gopls/internal/lsp/regtest/expectation.go b/gopls/internal/lsp/regtest/expectation.go index 9d9f023d92a..7cfee8b0169 100644 --- a/gopls/internal/lsp/regtest/expectation.go +++ b/gopls/internal/lsp/regtest/expectation.go @@ -235,7 +235,7 @@ func ShownMessage(containing string) Expectation { } return Expectation{ Check: check, - Description: "received ShowMessage", + Description: fmt.Sprintf("received window/showMessage containing %q", containing), } } diff --git a/gopls/internal/lsp/regtest/options.go b/gopls/internal/lsp/regtest/options.go index 7a41696fe65..f55fd5b1150 100644 --- a/gopls/internal/lsp/regtest/options.go +++ b/gopls/internal/lsp/regtest/options.go @@ -64,8 +64,14 @@ func WindowsLineEndings() RunOption { }) } -// Settings is a RunOption that sets user-provided configuration for the LSP -// server. +// ClientName sets the LSP client name. +func ClientName(name string) RunOption { + return optionSetter(func(opts *runConfig) { + opts.editor.ClientName = name + }) +} + +// Settings sets user-provided configuration for the LSP server. // // As a special case, the env setting must not be provided via Settings: use // EnvVars instead. diff --git a/gopls/internal/lsp/source/options.go b/gopls/internal/lsp/source/options.go index 2ca889513da..23d6e9a45a2 100644 --- a/gopls/internal/lsp/source/options.go +++ b/gopls/internal/lsp/source/options.go @@ -169,6 +169,7 @@ func DefaultOptions() *Options { DeepCompletion: true, ChattyDiagnostics: true, NewDiff: "both", + SubdirWatchPatterns: SubdirWatchPatternsAuto, }, Hooks: Hooks{ // TODO(adonovan): switch to new diff.Strings implementation. @@ -198,6 +199,7 @@ type Options struct { // ClientOptions holds LSP-specific configuration that is provided by the // client. type ClientOptions struct { + ClientInfo *protocol.Msg_XInitializeParams_clientInfo InsertTextFormat protocol.InsertTextFormat ConfigurationSupported bool DynamicConfigurationSupported bool @@ -536,6 +538,9 @@ type Hooks struct { // average user. These may be settings used by tests or outdated settings that // will soon be deprecated. Some of these settings may not even be configurable // by the user. +// +// TODO(rfindley): even though these settings are not intended for +// modification, we should surface them in our documentation. type InternalOptions struct { // LiteralCompletions controls whether literal candidates such as // "&someStruct{}" are offered. Tests disable this flag to simplify @@ -599,8 +604,42 @@ type InternalOptions struct { // file change. If unset, gopls only reports diagnostics when they change, or // when a file is opened or closed. ChattyDiagnostics bool + + // SubdirWatchPatterns configures the file watching glob patterns registered + // by gopls. + // + // Some clients (namely VS Code) do not send workspace/didChangeWatchedFile + // notifications for files contained in a directory when that directory is + // deleted: + // https://github.com/microsoft/vscode/issues/109754 + // + // In this case, gopls would miss important notifications about deleted + // packages. To work around this, gopls registers a watch pattern for each + // directory containing Go files. + // + // Unfortunately, other clients experience performance problems with this + // many watch patterns, so there is no single behavior that works well for + // all clients. + // + // The "subdirWatchPatterns" setting allows configuring this behavior. Its + // default value of "auto" attempts to guess the correct behavior based on + // the client name. We'd love to avoid this specialization, but as described + // above there is no single value that works for all clients. + // + // If any LSP client does not behave well with the default value (for + // example, if like VS Code it drops file notifications), please file an + // issue. + SubdirWatchPatterns SubdirWatchPatterns } +type SubdirWatchPatterns string + +const ( + SubdirWatchPatternsOn SubdirWatchPatterns = "on" + SubdirWatchPatternsOff SubdirWatchPatterns = "off" + SubdirWatchPatternsAuto SubdirWatchPatterns = "auto" +) + type ImportShortcut string const ( @@ -742,7 +781,8 @@ func SetOptions(options *Options, opts interface{}) OptionResults { return results } -func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) { +func (o *Options) ForClientCapabilities(clientName *protocol.Msg_XInitializeParams_clientInfo, caps protocol.ClientCapabilities) { + o.ClientInfo = clientName // Check if the client supports snippets in completion items. if caps.Workspace.WorkspaceEdit != nil { o.SupportedResourceOperations = caps.Workspace.WorkspaceEdit.ResourceOperations @@ -1159,6 +1199,15 @@ func (o *Options) set(name string, value interface{}, seen map[string]struct{}) case "chattyDiagnostics": result.setBool(&o.ChattyDiagnostics) + case "subdirWatchPatterns": + if s, ok := result.asOneOf( + string(SubdirWatchPatternsOn), + string(SubdirWatchPatternsOff), + string(SubdirWatchPatternsAuto), + ); ok { + o.SubdirWatchPatterns = SubdirWatchPatterns(s) + } + // Replaced settings. case "experimentalDisabledAnalyses": result.deprecated("analyses") diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index f8e59a0d0f6..c765cb00da1 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -1506,7 +1506,13 @@ func main() { bob.Hello() } ` - Run(t, mod, func(t *testing.T, env *Env) { + WithOptions( + Settings{ + // Now that we don't watch subdirs by default (except for VS Code), + // we must explicitly ask gopls to requests subdir watch patterns. + "subdirWatchPatterns": "on", + }, + ).Run(t, mod, func(t *testing.T, env *Env) { env.OnceMet( InitialWorkspaceLoad, FileWatchMatching("bob"), diff --git a/gopls/internal/regtest/watch/setting_test.go b/gopls/internal/regtest/watch/setting_test.go new file mode 100644 index 00000000000..9ed7fdeaa83 --- /dev/null +++ b/gopls/internal/regtest/watch/setting_test.go @@ -0,0 +1,85 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regtest + +import ( + "fmt" + "testing" + + . "golang.org/x/tools/gopls/internal/lsp/regtest" +) + +func TestSubdirWatchPatterns(t *testing.T) { + const files = ` +-- go.mod -- +module mod.test + +go 1.18 +-- subdir/subdir.go -- +package subdir +` + + tests := []struct { + clientName string + subdirWatchPatterns string + wantWatched bool + }{ + {"other client", "on", true}, + {"other client", "off", false}, + {"other client", "auto", false}, + {"Visual Studio Code", "auto", true}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s", test.clientName, test.subdirWatchPatterns), func(t *testing.T) { + WithOptions( + ClientName(test.clientName), + Settings{ + "subdirWatchPatterns": test.subdirWatchPatterns, + }, + ).Run(t, files, func(t *testing.T, env *Env) { + var expectation Expectation + if test.wantWatched { + expectation = FileWatchMatching("subdir") + } else { + expectation = NoFileWatchMatching("subdir") + } + env.OnceMet( + InitialWorkspaceLoad, + expectation, + ) + }) + }) + } +} + +// This test checks that we surface errors for invalid subdir watch patterns, +// as the triple of ("off"|"on"|"auto") may be confusing to users inclined to +// use (true|false) or some other truthy value. +func TestSubdirWatchPatterns_BadValues(t *testing.T) { + tests := []struct { + badValue interface{} + wantMessage string + }{ + {true, "invalid type bool, expect string"}, + {false, "invalid type bool, expect string"}, + {"yes", `invalid option "yes"`}, + } + + for _, test := range tests { + t.Run(fmt.Sprint(test.badValue), func(t *testing.T) { + WithOptions( + Settings{ + "subdirWatchPatterns": test.badValue, + }, + ).Run(t, "", func(t *testing.T, env *Env) { + env.OnceMet( + InitialWorkspaceLoad, + ShownMessage(test.wantMessage), + ) + }) + }) + } +} From edbfdbebff070ae6e5f5c801238f652e921fd4e2 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 19 May 2023 17:37:09 -0400 Subject: [PATCH 057/109] gopls/internal/lsp/source/completion: (unimported) add placeholders The logic added in CL 472183 to provide completions of functions from unimported packages, which lack type information, didn't bother with placeholders. This change causes it to generate placeholders ("name type") from the raw syntax. Fixes golang/go#60269 Change-Id: I66340e18de90bdee471a0dfbb1e3fd5c77fec75f Reviewed-on: https://go-review.googlesource.com/c/tools/+/496596 gopls-CI: kokoro Run-TryBot: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- .../lsp/source/completion/completion.go | 28 ++++++++--- .../regtest/completion/completion_test.go | 49 ++++++++++++++++++- 2 files changed, 68 insertions(+), 9 deletions(-) diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go index ad5ce16b372..bc2b0c31bd7 100644 --- a/gopls/internal/lsp/source/completion/completion.go +++ b/gopls/internal/lsp/source/completion/completion.go @@ -12,6 +12,7 @@ import ( "go/ast" "go/constant" "go/parser" + "go/printer" "go/scanner" "go/token" "go/types" @@ -1268,19 +1269,30 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { var sn snippet.Builder sn.WriteText(id.Name) sn.WriteText("(") + + var cfg printer.Config // slight overkill var nparams int + param := func(name string, typ ast.Expr) { + if nparams > 0 { + sn.WriteText(", ") + } + nparams++ + sn.WritePlaceholder(func(b *snippet.Builder) { + var buf strings.Builder + buf.WriteString(name) + buf.WriteByte(' ') + cfg.Fprint(&buf, token.NewFileSet(), typ) + b.WriteText(buf.String()) + }) + } for _, field := range fn.Type.Params.List { if field.Names != nil { - nparams += len(field.Names) + for _, name := range field.Names { + param(name.Name, field.Type) + } } else { - nparams++ - } - } - for i := 0; i < nparams; i++ { - if i > 0 { - sn.WriteText(", ") + param("_", field.Type) } - sn.WritePlaceholder(nil) } sn.WriteText(")") item.snippet = &sn diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go index 872bdc224af..7f865942206 100644 --- a/gopls/internal/regtest/completion/completion_test.go +++ b/gopls/internal/regtest/completion/completion_test.go @@ -527,13 +527,60 @@ func main() { env.AcceptCompletion(loc, completions.Items[0]) env.Await(env.DoneWithChange()) got := env.BufferText("main.go") - want := "package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"math\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"a\")\r\n\tmath.Sqrt(${1:})\r\n}\r\n" + want := "package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"math\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"a\")\r\n\tmath.Sqrt(${1:x float64})\r\n}\r\n" if diff := cmp.Diff(want, got); diff != "" { t.Errorf("unimported completion (-want +got):\n%s", diff) } }) } +func TestUnimportedCompletionHasPlaceholders60269(t *testing.T) { + // We can't express this as a marker test because it doesn't support AcceptCompletion. + const src = ` +-- go.mod -- +module example.com +go 1.12 + +-- a/a.go -- +package a + +var _ = b.F + +-- b/b.go -- +package b + +func F0(a, b int, c float64) {} +func F1(int, chan *string) {} +` + WithOptions( + WindowsLineEndings(), + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("a/a.go") + env.Await(env.DoneWithOpen()) + + // The table lists the expected completions as they appear in Items. + const common = "package a\r\n\r\nimport \"example.com/b\"\r\n\r\nvar _ = " + for i, want := range []string{ + common + "b.F0(${1:a int}, ${2:b int}, ${3:c float64})\r\n", + common + "b.F1(${1:_ int}, ${2:_ chan *string})\r\n", + } { + loc := env.RegexpSearch("a/a.go", "b.F()") + completions := env.Completion(loc) + if len(completions.Items) == 0 { + t.Fatalf("no completion items") + } + saved := env.BufferText("a/a.go") + env.AcceptCompletion(loc, completions.Items[i]) + env.Await(env.DoneWithChange()) + got := env.BufferText("a/a.go") + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("%d: unimported completion (-want +got):\n%s", i, diff) + } + env.SetBufferContent("a/a.go", saved) // restore + } + }) +} + func TestPackageMemberCompletionAfterSyntaxError(t *testing.T) { // This test documents the current broken behavior due to golang/go#58833. const src = ` From 9ca66ba886f38804b2e2b02940c9d89418fa33cb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 21 May 2023 17:47:25 -0400 Subject: [PATCH 058/109] gopls/internal/lsp/regtest: delete TestWatchReplaceTargets We don't actually watch replace targets anymore. The way to specify if a module is being used is by including it in a go.work file. Looking back on the flakiness, I am pretty sure it was due simply to type-checking on slow builders, back when we limited each regtest to 20s. This module imports some standard library packages that used to be slow to type check. I am pretty sure this test would no longer be flaky, if we still supported the functionality. While porting over the assertions from this test to TestUseGoWork, I discovered golang/go#60340, a bug in the order of our file watcher evaluation. Fixes golang/go#50748 Change-Id: I26c10ac659d0f195da18b6181b54d7c373cc984b Reviewed-on: https://go-review.googlesource.com/c/tools/+/496879 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/regtest/expectation.go | 44 ---------------- .../regtest/workspace/workspace_test.go | 51 +++++++++---------- 2 files changed, 23 insertions(+), 72 deletions(-) diff --git a/gopls/internal/lsp/regtest/expectation.go b/gopls/internal/lsp/regtest/expectation.go index 7cfee8b0169..a7706166d5a 100644 --- a/gopls/internal/lsp/regtest/expectation.go +++ b/gopls/internal/lsp/regtest/expectation.go @@ -576,50 +576,6 @@ func jsonProperty(obj interface{}, path ...string) interface{} { return jsonProperty(m[path[0]], path[1:]...) } -// RegistrationMatching asserts that the client has received a capability -// registration matching the given regexp. -// -// TODO(rfindley): remove this once TestWatchReplaceTargets has been revisited. -// -// Deprecated: use (No)FileWatchMatching -func RegistrationMatching(re string) Expectation { - rec := regexp.MustCompile(re) - check := func(s State) Verdict { - for _, p := range s.registrations { - for _, r := range p.Registrations { - if rec.Match([]byte(r.Method)) { - return Met - } - } - } - return Unmet - } - return Expectation{ - Check: check, - Description: fmt.Sprintf("registration matching %q", re), - } -} - -// UnregistrationMatching asserts that the client has received an -// unregistration whose ID matches the given regexp. -func UnregistrationMatching(re string) Expectation { - rec := regexp.MustCompile(re) - check := func(s State) Verdict { - for _, p := range s.unregistrations { - for _, r := range p.Unregisterations { - if rec.Match([]byte(r.Method)) { - return Met - } - } - } - return Unmet - } - return Expectation{ - Check: check, - Description: fmt.Sprintf("unregistration matching %q", re), - } -} - // Diagnostics asserts that there is at least one diagnostic matching the given // filters. func Diagnostics(filters ...DiagnosticFilter) Expectation { diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 5a94e42d1f6..88f9f2c2be3 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -183,29 +183,6 @@ replace random.org => %s }) } -// This test checks that gopls updates the set of files it watches when a -// replace target is added to the go.mod. -func TestWatchReplaceTargets(t *testing.T) { - t.Skipf("skipping known-flaky test: see https://go.dev/issue/50748") - - WithOptions( - ProxyFiles(workspaceProxy), - WorkspaceFolders("pkg"), - ).Run(t, workspaceModule, func(t *testing.T, env *Env) { - // Add a replace directive and expect the files that gopls is watching - // to change. - dir := env.Sandbox.Workdir.URI("goodbye").SpanURI().Filename() - goModWithReplace := fmt.Sprintf(`%s -replace random.org => %s -`, env.ReadWorkspaceFile("pkg/go.mod"), dir) - env.WriteWorkspaceFile("pkg/go.mod", goModWithReplace) - env.AfterChange( - UnregistrationMatching("didChangeWatchedFiles"), - RegistrationMatching("didChangeWatchedFiles"), - ) - }) -} - const workspaceModuleProxy = ` -- example.com@v1.2.3/go.mod -- module example.com @@ -575,10 +552,18 @@ use ( ` WithOptions( ProxyFiles(workspaceModuleProxy), + Settings{ + "subdirWatchPatterns": "on", + }, ).Run(t, multiModule, func(t *testing.T, env *Env) { - // Initially, the go.work should cause only the a.com module to be - // loaded. Validate this by jumping to a definition in b.com and ensuring - // that we go to the module cache. + // Initially, the go.work should cause only the a.com module to be loaded, + // so we shouldn't get any file watches for modb. Further validate this by + // jumping to a definition in b.com and ensuring that we go to the module + // cache. + env.OnceMet( + InitialWorkspaceLoad, + NoFileWatchMatching("modb"), + ) env.OpenFile("moda/a/a.go") env.Await(env.DoneWithOpen()) @@ -610,9 +595,13 @@ use ( `) // As of golang/go#54069, writing go.work to the workspace triggers a - // workspace reload. + // workspace reload, and new file watches. env.AfterChange( Diagnostics(env.AtRegexp("modb/b/b.go", "x")), + // TODO(golang/go#60340): we don't get a file watch yet, because + // updateWatchedDirectories runs before snapshot.load. Instead, we get it + // after the next change (the didOpen below). + // FileWatchMatching("modb"), ) // Jumping to definition should now go to b.com in the workspace. @@ -623,7 +612,13 @@ use ( // Now, let's modify the go.work *overlay* (not on disk), and verify that // this change is only picked up once it is saved. env.OpenFile("go.work") - env.AfterChange() + env.AfterChange( + // TODO(golang/go#60340): delete this expectation in favor of + // the commented-out expectation above, once we fix the evaluation order + // of file watches. We should not have to wait for a second change to get + // the correct watches. + FileWatchMatching("modb"), + ) env.SetBufferContent("go.work", `go 1.17 use ( From e6fd7f4c073132d9f7447caaefe336b8a0582e3a Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 11:01:53 -0400 Subject: [PATCH 059/109] gopls/internal/lsp/cache: limit module scan to 100K files When no go.work or go.mod file is found, gopls searches to see if there is exactly one module in a nested directory, in which case it narrows the workspace to this one module. This is a legacy workaround for polyglot repositories, and will be made obsolete by golang/go#57979. However, in the meantime this feature is still necessary, and is the last remaining place where we walk the workspace looking for modules. As reported in golang/go#56496, this search can be expensive in very large directories. Reduce the search limit 10x, from 1M->100K, and use the more efficient filepath.WalkDir. Fixes golang/go#56496 Change-Id: Ia46dd90ac2220b09debc68742dd882885c38eb42 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496880 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley --- gopls/internal/lsp/cache/workspace.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gopls/internal/lsp/cache/workspace.go b/gopls/internal/lsp/cache/workspace.go index de36da69b91..28179f5a0b9 100644 --- a/gopls/internal/lsp/cache/workspace.go +++ b/gopls/internal/lsp/cache/workspace.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "path/filepath" "sort" @@ -127,7 +128,10 @@ var errExhausted = errors.New("exhausted") // Limit go.mod search to 1 million files. As a point of reference, // Kubernetes has 22K files (as of 2020-11-24). -const fileLimit = 1000000 +// +// Note: per golang/go#56496, the previous limit of 1M files was too slow, at +// which point this limit was decreased to 100K. +const fileLimit = 100_000 // findModules recursively walks the root directory looking for go.mod files, // returning the set of modules it discovers. If modLimit is non-zero, @@ -139,7 +143,7 @@ func findModules(root span.URI, excludePath func(string) bool, modLimit int) (ma modFiles := make(map[span.URI]struct{}) searched := 0 errDone := errors.New("done") - err := filepath.Walk(root.Filename(), func(path string, info os.FileInfo, err error) error { + err := filepath.WalkDir(root.Filename(), func(path string, info fs.DirEntry, err error) error { if err != nil { // Probably a permission error. Keep looking. return filepath.SkipDir From 5ff5cbb00a33ae987651ad350e36f1648b06882b Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 13:24:35 -0400 Subject: [PATCH 060/109] gopls: deprecate support for Go 1.16 and 1.17, update warnings Update our version table to reflect the existence of gopls@v0.12.0, and deprecate support for Go 1.16 and 1.17. Fixes golang/go#60341 Change-Id: Id061aafacb4099f57d464b5a7453bc1f98fda80a Reviewed-on: https://go-review.googlesource.com/c/tools/+/496881 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- gopls/README.md | 1 + gopls/internal/lsp/general.go | 42 +++++++++++++++++++++++------- gopls/internal/lsp/general_test.go | 16 +++++++----- 3 files changed, 44 insertions(+), 15 deletions(-) diff --git a/gopls/README.md b/gopls/README.md index 56d15921a70..396f86c0242 100644 --- a/gopls/README.md +++ b/gopls/README.md @@ -93,6 +93,7 @@ version of gopls. | ----------- | --------------------------------------------------- | | Go 1.12 | [gopls@v0.7.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.7.5) | | Go 1.15 | [gopls@v0.9.5](https://github.com/golang/tools/releases/tag/gopls%2Fv0.9.5) | +| Go 1.17 | [gopls@v0.11.0](https://github.com/golang/tools/releases/tag/gopls%2Fv0.11.0) | Our extended support is enforced via [continuous integration with older Go versions](doc/contributing.md#ci). This legacy Go CI may not block releases: diff --git a/gopls/internal/lsp/general.go b/gopls/internal/lsp/general.go index 04fe7136562..7486f24904a 100644 --- a/gopls/internal/lsp/general.go +++ b/gopls/internal/lsp/general.go @@ -8,6 +8,7 @@ import ( "context" "encoding/json" "fmt" + "go/build" "log" "os" "path" @@ -239,14 +240,16 @@ func (s *Server) initialized(ctx context.Context, params *protocol.InitializedPa // GoVersionTable maps Go versions to the gopls version in which support will // be deprecated, and the final gopls version supporting them without warnings. -// Keep this in sync with gopls/README.md +// Keep this in sync with gopls/README.md. // // Must be sorted in ascending order of Go version. // // Mutable for testing. var GoVersionTable = []GoVersionSupport{ {12, "", "v0.7.5"}, - {15, "v0.11.0", "v0.9.5"}, + {15, "", "v0.9.5"}, + {16, "v0.13.0", "v0.11.0"}, + {17, "v0.13.0", "v0.11.0"}, } // GoVersionSupport holds information about end-of-life Go version support. @@ -262,11 +265,13 @@ func OldestSupportedGoVersion() int { return GoVersionTable[len(GoVersionTable)-1].GoVersion + 1 } -// versionMessage returns the warning/error message to display if the user is -// on the given Go version, if any. The goVersion variable is the X in Go 1.X. +// versionMessage returns the warning/error message to display if the user has +// the given Go version, if any. The goVersion variable is the X in Go 1.X. If +// fromBuild is set, the Go version is the version used to build gopls. +// Otherwise, it is the go command version. // // If goVersion is invalid (< 0), it returns "", 0. -func versionMessage(goVersion int) (string, protocol.MessageType) { +func versionMessage(goVersion int, fromBuild bool) (string, protocol.MessageType) { if goVersion < 0 { return "", 0 } @@ -276,7 +281,11 @@ func versionMessage(goVersion int) (string, protocol.MessageType) { var msgBuilder strings.Builder mType := protocol.Error - fmt.Fprintf(&msgBuilder, "Found Go version 1.%d", goVersion) + if fromBuild { + fmt.Fprintf(&msgBuilder, "Gopls was built with Go version 1.%d", goVersion) + } else { + fmt.Fprintf(&msgBuilder, "Found Go version 1.%d", goVersion) + } if v.DeprecatedVersion != "" { // not deprecated yet, just a warning fmt.Fprintf(&msgBuilder, ", which will be unsupported by gopls %s. ", v.DeprecatedVersion) @@ -299,15 +308,15 @@ func versionMessage(goVersion int) (string, protocol.MessageType) { // // It should be called after views change. func (s *Server) checkViewGoVersions() { - oldestVersion := -1 + oldestVersion, fromBuild := go1Point(), true for _, view := range s.session.Views() { viewVersion := view.GoVersion() if oldestVersion == -1 || viewVersion < oldestVersion { - oldestVersion = viewVersion + oldestVersion, fromBuild = viewVersion, false } } - if msg, mType := versionMessage(oldestVersion); msg != "" { + if msg, mType := versionMessage(oldestVersion, fromBuild); msg != "" { s.eventuallyShowMessage(context.Background(), &protocol.ShowMessageParams{ Type: mType, Message: msg, @@ -315,6 +324,21 @@ func (s *Server) checkViewGoVersions() { } } +// go1Point returns the x in Go 1.x. If an error occurs extracting the go +// version, it returns -1. +// +// Copied from the testenv package. +func go1Point() int { + for i := len(build.Default.ReleaseTags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil { + continue + } + return version + } + return -1 +} + func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error { originalViews := len(s.session.Views()) viewErrors := make(map[span.URI]error) diff --git a/gopls/internal/lsp/general_test.go b/gopls/internal/lsp/general_test.go index a0312ba1b43..6bc0dc1cb2b 100644 --- a/gopls/internal/lsp/general_test.go +++ b/gopls/internal/lsp/general_test.go @@ -14,18 +14,22 @@ import ( func TestVersionMessage(t *testing.T) { tests := []struct { goVersion int + fromBuild bool wantContains []string // string fragments that we expect to see wantType protocol.MessageType }{ - {-1, nil, 0}, - {12, []string{"1.12", "not supported", "upgrade to Go 1.16", "install gopls v0.7.5"}, protocol.Error}, - {13, []string{"1.13", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning}, - {15, []string{"1.15", "will be unsupported by gopls v0.11.0", "upgrade to Go 1.16", "install gopls v0.9.5"}, protocol.Warning}, - {16, nil, 0}, + {-1, false, nil, 0}, + {12, false, []string{"1.12", "not supported", "upgrade to Go 1.18", "install gopls v0.7.5"}, protocol.Error}, + {13, false, []string{"1.13", "not supported", "upgrade to Go 1.18", "install gopls v0.9.5"}, protocol.Error}, + {15, false, []string{"1.15", "not supported", "upgrade to Go 1.18", "install gopls v0.9.5"}, protocol.Error}, + {15, true, []string{"Gopls was built with Go version 1.15", "not supported", "upgrade to Go 1.18", "install gopls v0.9.5"}, protocol.Error}, + {16, false, []string{"1.16", "will be unsupported by gopls v0.13.0", "upgrade to Go 1.18", "install gopls v0.11.0"}, protocol.Warning}, + {17, false, []string{"1.17", "will be unsupported by gopls v0.13.0", "upgrade to Go 1.18", "install gopls v0.11.0"}, protocol.Warning}, + {17, true, []string{"Gopls was built with Go version 1.17", "will be unsupported by gopls v0.13.0", "upgrade to Go 1.18", "install gopls v0.11.0"}, protocol.Warning}, } for _, test := range tests { - gotMsg, gotType := versionMessage(test.goVersion) + gotMsg, gotType := versionMessage(test.goVersion, test.fromBuild) if len(test.wantContains) == 0 && gotMsg != "" { t.Errorf("versionMessage(%d) = %q, want \"\"", test.goVersion, gotMsg) From a12ee94f7b2d83ade1907ac77e56395097e522fa Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 14:00:22 -0400 Subject: [PATCH 061/109] gopls/internal/regtest/misc: update some unilaterally skipped tests Remove skips for two tests related to line directives (now fixed), and delete a test related to the old parse cache, which no longer exists. Updates golang/go#53878 Change-Id: I15b1e5d72f5ccc8c094eaa43e73a9bcc1f75c031 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496882 Reviewed-by: Peter Weinberger TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro --- gopls/internal/regtest/misc/failures_test.go | 2 - gopls/internal/regtest/misc/leak_test.go | 89 -------------------- 2 files changed, 91 deletions(-) delete mode 100644 gopls/internal/regtest/misc/leak_test.go diff --git a/gopls/internal/regtest/misc/failures_test.go b/gopls/internal/regtest/misc/failures_test.go index 42aa3721a34..b5da9b02e15 100644 --- a/gopls/internal/regtest/misc/failures_test.go +++ b/gopls/internal/regtest/misc/failures_test.go @@ -15,7 +15,6 @@ import ( // that includes a line directive, which makes no difference since // gopls ignores line directives. func TestHoverFailure(t *testing.T) { - t.Skip("line directives //line ") const mod = ` -- go.mod -- module mod.com @@ -48,7 +47,6 @@ func main() { // This test demonstrates a case where gopls is not at all confused by // line directives, because it completely ignores them. func TestFailingDiagnosticClearingOnEdit(t *testing.T) { - t.Skip("line directives //line ") // badPackageDup contains a duplicate definition of the 'a' const. // This is a minor variant of TestDiagnosticClearingOnEdit from // diagnostics_test.go, with a line directive, which makes no difference. diff --git a/gopls/internal/regtest/misc/leak_test.go b/gopls/internal/regtest/misc/leak_test.go deleted file mode 100644 index 586ffcc41e9..00000000000 --- a/gopls/internal/regtest/misc/leak_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package misc - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "golang.org/x/tools/gopls/internal/hooks" - "golang.org/x/tools/gopls/internal/lsp/cache" - "golang.org/x/tools/gopls/internal/lsp/debug" - "golang.org/x/tools/gopls/internal/lsp/fake" - "golang.org/x/tools/gopls/internal/lsp/lsprpc" - . "golang.org/x/tools/gopls/internal/lsp/regtest" - "golang.org/x/tools/internal/jsonrpc2" - "golang.org/x/tools/internal/jsonrpc2/servertest" -) - -// Test for golang/go#57222. -func TestCacheLeak(t *testing.T) { - // TODO(rfindley): either fix this test with additional instrumentation, or - // delete it. - t.Skip("This test races with cache eviction.") - const files = `-- a.go -- -package a - -func _() { - println("1") -} -` - c := cache.New(nil) - env := setupEnv(t, files, c) - env.Await(InitialWorkspaceLoad) - env.OpenFile("a.go") - - // Make a couple edits to stabilize cache state. - // - // For some reason, after only one edit we're left with two parsed files - // (perhaps because something had to ParseHeader). If this test proves flaky, - // we'll need to investigate exactly what is causing various parse modes to - // be present (or rewrite the test to be more tolerant, for example make ~100 - // modifications and assert that we're within a few of where we're started). - env.RegexpReplace("a.go", "1", "2") - env.RegexpReplace("a.go", "2", "3") - env.AfterChange() - - // Capture cache state, make an arbitrary change, and wait for gopls to do - // its work. Afterward, we should have the exact same number of parsed - before := c.MemStats() - env.RegexpReplace("a.go", "3", "4") - env.AfterChange() - after := c.MemStats() - - if diff := cmp.Diff(before, after); diff != "" { - t.Errorf("store objects differ after change (-before +after)\n%s", diff) - } -} - -// setupEnv creates a new sandbox environment for editing the txtar encoded -// content of files. It uses a new gopls instance backed by the Cache c. -func setupEnv(t *testing.T, files string, c *cache.Cache) *Env { - ctx := debug.WithInstance(context.Background(), "", "off") - server := lsprpc.NewStreamServer(c, false, hooks.Options) - ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream) - s, err := fake.NewSandbox(&fake.SandboxConfig{ - Files: fake.UnpackTxt(files), - }) - if err != nil { - t.Fatal(err) - } - - a := NewAwaiter(s.Workdir) - const skipApplyEdits = false - editor, err := fake.NewEditor(s, fake.EditorConfig{}).Connect(ctx, ts, a.Hooks(), skipApplyEdits) - if err != nil { - t.Fatal(err) - } - - return &Env{ - T: t, - Ctx: ctx, - Editor: editor, - Sandbox: s, - Awaiter: a, - } -} From ec543c5a2fb1b737d8c1e94c4daef3d518e5ebd4 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 22 May 2023 15:51:50 -0400 Subject: [PATCH 062/109] gopls/internal/lsp/cache: fix crash in Session.updateViewLocked The createView release function should not be called until we have finished using the view's overlays. Change-Id: I988c7f5e8fa8bc41108b491286501881b03a535f Reviewed-on: https://go-review.googlesource.com/c/tools/+/496884 Reviewed-by: Robert Findley Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/session.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go index e13f4c8a474..8eae64e4d82 100644 --- a/gopls/internal/lsp/cache/session.go +++ b/gopls/internal/lsp/cache/session.go @@ -109,13 +109,14 @@ func (s *Session) NewView(ctx context.Context, name string, folder span.URI, opt // TODO(rfindley): clarify that createView can never be cancelled (with the // possible exception of server shutdown). +// On success, the caller becomes responsible for calling the release function once. func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, seqID uint64) (*View, *snapshot, func(), error) { index := atomic.AddInt64(&viewIndex, 1) // Get immutable workspace information. info, err := s.getWorkspaceInformation(ctx, folder, options) if err != nil { - return nil, nil, func() {}, err + return nil, nil, nil, err } gowork, _ := info.GOWORK() @@ -327,6 +328,15 @@ func (s *Session) updateViewLocked(ctx context.Context, view *View, options *sou } v, snapshot, release, err := s.createView(ctx, view.name, view.folder, options, seqID) + if err != nil { + // we have dropped the old view, but could not create the new one + // this should not happen and is very bad, but we still need to clean + // up the view array if it happens + s.views = removeElement(s.views, i) + return nil, err + } + defer release() + // The new snapshot has lost the history of the previous view. As a result, // it may not see open files that aren't in its build configuration (as it // would have done via didOpen notifications). This can lead to inconsistent @@ -336,15 +346,7 @@ func (s *Session) updateViewLocked(ctx context.Context, view *View, options *sou for _, o := range v.fs.Overlays() { _, _ = snapshot.ReadFile(ctx, o.URI()) } - release() - if err != nil { - // we have dropped the old view, but could not create the new one - // this should not happen and is very bad, but we still need to clean - // up the view array if it happens - s.views = removeElement(s.views, i) - return nil, err - } // substitute the new view into the array where the old view was s.views[i] = v return v, nil From 6997d196fdf47567ff25d193fd48ddd4470a9d7d Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 15:23:18 -0400 Subject: [PATCH 063/109] gopls/internal/regtest/misc: unskip TestMajorOptionsChange This test was fixed in CL 494675, which forced snapshots to observe all overlays when updating the view. Updates golang/go#53878 Fixes golang/go#57934 Change-Id: I018bdd260255d6a630c7fc8788935fd69f5e7477 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496883 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- gopls/internal/regtest/misc/configuration_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/gopls/internal/regtest/misc/configuration_test.go b/gopls/internal/regtest/misc/configuration_test.go index 6cbfe373e4a..853abcd7dff 100644 --- a/gopls/internal/regtest/misc/configuration_test.go +++ b/gopls/internal/regtest/misc/configuration_test.go @@ -57,9 +57,7 @@ var FooErr = errors.New("foo") // // Gopls should not get confused about buffer content when recreating the view. func TestMajorOptionsChange(t *testing.T) { - t.Skip("broken due to golang/go#57934") - - testenv.NeedsGo1Point(t, 17) + testenv.NeedsGo1Point(t, 19) // needs staticcheck const files = ` -- go.mod -- From a70f2bc213fb73930180d21edc3162030878803c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 16:57:52 -0400 Subject: [PATCH 064/109] gopls/internal/regtest/misc: update and unskip TestHoverIntLiteral This test is updated to exercise hover over literals, not vars, as was decided in golang/go#58220. Updates golang/go#53878 Change-Id: Ic70d3492f28580ebfea24ec08dc47b1ad385c2ff Reviewed-on: https://go-review.googlesource.com/c/tools/+/496885 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro Auto-Submit: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/regtest/misc/hover_test.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/gopls/internal/regtest/misc/hover_test.go b/gopls/internal/regtest/misc/hover_test.go index 24ee6d857d5..41c6529a7bf 100644 --- a/gopls/internal/regtest/misc/hover_test.go +++ b/gopls/internal/regtest/misc/hover_test.go @@ -84,12 +84,6 @@ func main() { } func TestHoverIntLiteral(t *testing.T) { - // TODO(rfindley): this behavior doesn't actually make sense for vars. It is - // misleading to format their value when it is (of course) variable. - // - // Instead, we should allow hovering on numeric literals. - t.Skip("golang/go#58220: broken due to new hover logic") - const source = ` -- main.go -- package main @@ -106,13 +100,13 @@ func main() { Run(t, source, func(t *testing.T, env *Env) { env.OpenFile("main.go") hexExpected := "58190" - got, _ := env.Hover(env.RegexpSearch("main.go", "hex")) + got, _ := env.Hover(env.RegexpSearch("main.go", "0xe")) if got != nil && !strings.Contains(got.Value, hexExpected) { t.Errorf("Hover: missing expected field '%s'. Got:\n%q", hexExpected, got.Value) } binExpected := "73" - got, _ = env.Hover(env.RegexpSearch("main.go", "bigBin")) + got, _ = env.Hover(env.RegexpSearch("main.go", "0b1")) if got != nil && !strings.Contains(got.Value, binExpected) { t.Errorf("Hover: missing expected field '%s'. Got:\n%q", binExpected, got.Value) } From 7a03febeeead7497d3821eb99461ecfeae51c0ba Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 23 May 2023 12:14:48 -0400 Subject: [PATCH 065/109] gopls/internal/lsp/cmd: remove vestiges of debugging golang/go#59475 Updates golang/go#59475 Change-Id: Ib775deeff902e2eba4a99fb970374063897da7df Reviewed-on: https://go-review.googlesource.com/c/tools/+/497396 Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Run-TryBot: Alan Donovan --- gopls/internal/lsp/cmd/cmd.go | 5 ----- gopls/internal/lsp/cmd/suggested_fix.go | 12 ------------ gopls/internal/lsp/cmd/test/integration_test.go | 5 +---- 3 files changed, 1 insertion(+), 21 deletions(-) diff --git a/gopls/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go index 02e135ae4e5..8c5556d95e0 100644 --- a/gopls/internal/lsp/cmd/cmd.go +++ b/gopls/internal/lsp/cmd/cmd.go @@ -518,11 +518,6 @@ func (c *cmdClient) ApplyEdit(ctx context.Context, p *protocol.ApplyWorkspaceEdi } func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishDiagnosticsParams) error { - var debug = os.Getenv(DebugSuggestedFixEnvVar) == "true" - if debug { - log.Printf("PublishDiagnostics URI=%v Diagnostics=%v", p.URI, p.Diagnostics) - } - if p.URI == "gopls://diagnostics-done" { close(c.diagnosticsDone) } diff --git a/gopls/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go index 1128688f970..2637517c50b 100644 --- a/gopls/internal/lsp/cmd/suggested_fix.go +++ b/gopls/internal/lsp/cmd/suggested_fix.go @@ -9,7 +9,6 @@ import ( "flag" "fmt" "io/ioutil" - "log" "os" "golang.org/x/tools/gopls/internal/lsp/protocol" @@ -42,16 +41,11 @@ fix-flags: printFlagDefaults(f) } -const DebugSuggestedFixEnvVar = "_DEBUG_SUGGESTED_FIX" - // Run performs diagnostic checks on the file specified and either; // - if -w is specified, updates the file in place; // - if -d is specified, prints out unified diffs of the changes; or // - otherwise, prints the new versions to stdout. func (s *suggestedFix) Run(ctx context.Context, args ...string) error { - // For debugging golang/go#59475, enable some additional output. - var debug = os.Getenv(DebugSuggestedFixEnvVar) == "true" - if len(args) < 1 { return tool.CommandLineErrorf("fix expects at least 1 argument") } @@ -80,9 +74,6 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { conn.Client.filesMu.Lock() diagnostics = append(diagnostics, file.diagnostics...) conn.Client.filesMu.Unlock() - if debug { - log.Printf("file diagnostics: %#v", diagnostics) - } // Request code actions codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix} @@ -106,9 +97,6 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { if err != nil { return fmt.Errorf("%v: %v", from, err) } - if debug { - log.Printf("code actions: %#v", actions) - } // Gather edits from matching code actions. var edits []protocol.TextEdit diff --git a/gopls/internal/lsp/cmd/test/integration_test.go b/gopls/internal/lsp/cmd/test/integration_test.go index c95790c4d95..b2c9c29e2e1 100644 --- a/gopls/internal/lsp/cmd/test/integration_test.go +++ b/gopls/internal/lsp/cmd/test/integration_test.go @@ -890,10 +890,7 @@ func goplsWithEnv(t *testing.T, dir string, env []string, args ...string) *resul } goplsCmd := exec.Command(os.Args[0], args...) - goplsCmd.Env = append(os.Environ(), - "ENTRYPOINT=goplsMain", - fmt.Sprintf("%s=true", cmd.DebugSuggestedFixEnvVar), - ) + goplsCmd.Env = append(os.Environ(), "ENTRYPOINT=goplsMain") goplsCmd.Env = append(goplsCmd.Env, env...) goplsCmd.Dir = dir goplsCmd.Stdout = new(bytes.Buffer) From 5ce721db5afa2ba3f21262706618a56351407972 Mon Sep 17 00:00:00 2001 From: Sung Yoon Whang Date: Wed, 17 May 2023 15:52:02 +0000 Subject: [PATCH 066/109] gopls/doc: Fix broken links Stumbled upon several broken links while looking at the gopls docs implementation.md. This fixes those. Change-Id: I946a91de65e41fbf5387eea1c9ec93dcd742dd32 GitHub-Last-Rev: 1eeaf9b520708661c5b26de79c298744df8b5f56 GitHub-Pull-Request: golang/tools#437 Reviewed-on: https://go-review.googlesource.com/c/tools/+/495815 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Suzy Mueller Run-TryBot: Suzy Mueller Reviewed-by: Matthew Dempsky Auto-Submit: Suzy Mueller --- gopls/doc/design/implementation.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gopls/doc/design/implementation.md b/gopls/doc/design/implementation.md index 859ec1c1219..e9b915ba393 100644 --- a/gopls/doc/design/implementation.md +++ b/gopls/doc/design/implementation.md @@ -37,12 +37,12 @@ Package | Description [gopls]: https://github.com/golang/tools/tree/master/gopls [internal/jsonrpc2]: https://github.com/golang/tools/tree/master/internal/jsonrpc2 -[internal/lsp]: https://github.com/golang/tools/tree/master/internal/lsp -[internal/lsp/cache]: https://github.com/golang/tools/tree/master/internal/lsp/cache -[internal/lsp/cmd]: https://github.com/golang/tools/tree/master/internal/lsp/cmd -[internal/lsp/debug]: https://github.com/golang/tools/tree/master/internal/lsp/debug -[internal/lsp/protocol]: https://github.com/golang/tools/tree/master/internal/lsp/protocol -[internal/lsp/source]: https://github.com/golang/tools/tree/master/internal/lsp/source +[internal/lsp]: https://github.com/golang/tools/tree/master/gopls/internal/lsp +[internal/lsp/cache]: https://github.com/golang/tools/tree/master/gopls/internal/lsp/cache +[internal/lsp/cmd]: https://github.com/golang/tools/tree/master/gopls/internal/lsp/cmd +[internal/lsp/debug]: https://github.com/golang/tools/tree/master/gopls/internal/lsp/debug +[internal/lsp/protocol]: https://github.com/golang/tools/tree/master/gopls/internal/lsp/protocol +[internal/lsp/source]: https://github.com/golang/tools/tree/master/gopls/internal/lsp/source [internal/memoize]: https://github.com/golang/tools/tree/master/internal/memoize -[internal/span]: https://github.com/golang/tools/tree/master/internal/span +[internal/span]: https://github.com/golang/tools/tree/master/gopls/internal/span [x/tools]: https://github.com/golang/tools From 1e6066861fe87ec57ebafea5ae707de3e0f5cf86 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 22 May 2023 17:11:27 -0400 Subject: [PATCH 067/109] gopls/internal/regtest/workspace: unskip duplicate modules test Unskip and update TestBrokenWorkspace_DuplicateModules. Using go1.20+, the go command does surface errors about duplicate modules in go.work files. Fixes golang/go#57650 Change-Id: If7674525bb57bd84885682f42fc93406f8be85d2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/496886 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/regtest/workspace/broken_test.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/gopls/internal/regtest/workspace/broken_test.go b/gopls/internal/regtest/workspace/broken_test.go index 005a7e94638..d7d54c4d139 100644 --- a/gopls/internal/regtest/workspace/broken_test.go +++ b/gopls/internal/regtest/workspace/broken_test.go @@ -23,10 +23,9 @@ import ( // Test for golang/go#53933 func TestBrokenWorkspace_DuplicateModules(t *testing.T) { - testenv.NeedsGo1Point(t, 18) - - // TODO(golang/go#57650): fix this feature. - t.Skip("we no longer detect duplicate modules") + // The go command error message was improved in Go 1.20 to mention multiple + // modules. + testenv.NeedsGo1Point(t, 20) // This proxy module content is replaced by the workspace, but is still // required for module resolution to function in the Go command. @@ -98,8 +97,8 @@ const CompleteMe = 222 ProxyFiles(proxy), ).Run(t, src, func(t *testing.T, env *Env) { env.OpenFile("package1/main.go") - env.Await( - OutstandingWork(lsp.WorkspaceLoadFailure, `found module "example.com/foo" multiple times in the workspace`), + env.AfterChange( + OutstandingWork(lsp.WorkspaceLoadFailure, `module example.com/foo appears multiple times in workspace`), ) // Remove the redundant vendored copy of example.com. @@ -110,10 +109,10 @@ const CompleteMe = 222 ./package2/vendor/example.com/foo ) `) - env.Await(NoOutstandingWork()) + env.AfterChange(NoOutstandingWork()) // Check that definitions in package1 go to the copy vendored in package2. - location := env.GoToDefinition(env.RegexpSearch("package1/main.go", "CompleteMe")).URI.SpanURI().Filename() + location := string(env.GoToDefinition(env.RegexpSearch("package1/main.go", "CompleteMe")).URI) const wantLocation = "package2/vendor/example.com/foo/foo.go" if !strings.HasSuffix(location, wantLocation) { t.Errorf("got definition of CompleteMe at %q, want %q", location, wantLocation) From 7e146a6c6f18a2b3041d3e5aa599422f8dfb365e Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 23 May 2023 14:25:24 -0400 Subject: [PATCH 068/109] gopls/internal/lsp/cmd: simplify connection type This change is a refactoring of cmd.connection so that all fields are set once at construction, the Client field is no longer exported, and the dead cmd.Client.Server field is removed. Change-Id: I7b0e8bde6cab37bff2db17415a13492a71b33fef Reviewed-on: https://go-review.googlesource.com/c/tools/+/497496 Reviewed-by: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan --- gopls/internal/lsp/cmd/capabilities_test.go | 16 +++--- gopls/internal/lsp/cmd/check.go | 4 +- gopls/internal/lsp/cmd/cmd.go | 60 ++++++++++++--------- gopls/internal/lsp/cmd/suggested_fix.go | 4 +- 4 files changed, 47 insertions(+), 37 deletions(-) diff --git a/gopls/internal/lsp/cmd/capabilities_test.go b/gopls/internal/lsp/cmd/capabilities_test.go index 39b60af2881..6d4e32f0fe2 100644 --- a/gopls/internal/lsp/cmd/capabilities_test.go +++ b/gopls/internal/lsp/cmd/capabilities_test.go @@ -41,17 +41,16 @@ func TestCapabilities(t *testing.T) { defer os.RemoveAll(tmpDir) app := New("gopls-test", tmpDir, os.Environ(), nil) - c := newConnection(app, nil) - ctx := context.Background() - defer c.terminate(ctx) params := &protocol.ParamInitialize{} - params.RootURI = protocol.URIFromPath(c.Client.app.wd) + params.RootURI = protocol.URIFromPath(app.wd) params.Capabilities.Workspace.Configuration = true // Send an initialize request to the server. - c.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), c.Client) - result, err := c.Server.Initialize(ctx, params) + ctx := context.Background() + client := newClient(app, nil) + server := lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), client) + result, err := server.Initialize(ctx, params) if err != nil { t.Fatal(err) } @@ -60,10 +59,13 @@ func TestCapabilities(t *testing.T) { t.Error(err) } // Complete initialization of server. - if err := c.Server.Initialized(ctx, &protocol.InitializedParams{}); err != nil { + if err := server.Initialized(ctx, &protocol.InitializedParams{}); err != nil { t.Fatal(err) } + c := newConnection(server, client) + defer c.terminate(ctx) + // Open the file on the server side. uri := protocol.URIFromPath(tmpFile) if err := c.Server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{ diff --git a/gopls/internal/lsp/cmd/check.go b/gopls/internal/lsp/cmd/check.go index f501c448109..a529f143884 100644 --- a/gopls/internal/lsp/cmd/check.go +++ b/gopls/internal/lsp/cmd/check.go @@ -57,8 +57,8 @@ func (c *check) Run(ctx context.Context, args ...string) error { if err := conn.diagnoseFiles(ctx, uris); err != nil { return err } - conn.Client.filesMu.Lock() - defer conn.Client.filesMu.Unlock() + conn.client.filesMu.Lock() + defer conn.client.filesMu.Unlock() for _, file := range checking { for _, d := range file.diagnostics { diff --git a/gopls/internal/lsp/cmd/cmd.go b/gopls/internal/lsp/cmd/cmd.go index 8c5556d95e0..0bd636c27d7 100644 --- a/gopls/internal/lsp/cmd/cmd.go +++ b/gopls/internal/lsp/cmd/cmd.go @@ -293,10 +293,14 @@ var ( func (app *Application) connect(ctx context.Context, onProgress func(*protocol.ProgressParams)) (*connection, error) { switch { case app.Remote == "": - connection := newConnection(app, onProgress) - connection.Server = lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), connection.Client) - ctx = protocol.WithClient(ctx, connection.Client) - return connection, connection.initialize(ctx, app.options) + client := newClient(app, onProgress) + server := lsp.NewServer(cache.NewSession(ctx, cache.New(nil), app.options), client) + conn := newConnection(server, client) + if err := conn.initialize(protocol.WithClient(ctx, client), app.options); err != nil { + return nil, err + } + return conn, nil + case strings.HasPrefix(app.Remote, "internal@"): internalMu.Lock() defer internalMu.Unlock() @@ -331,19 +335,19 @@ func CloseTestConnections(ctx context.Context) { } func (app *Application) connectRemote(ctx context.Context, remote string) (*connection, error) { - connection := newConnection(app, nil) conn, err := lsprpc.ConnectToRemote(ctx, remote) if err != nil { return nil, err } stream := jsonrpc2.NewHeaderStream(conn) cc := jsonrpc2.NewConn(stream) - connection.Server = protocol.ServerDispatcher(cc) - ctx = protocol.WithClient(ctx, connection.Client) + server := protocol.ServerDispatcher(cc) + client := newClient(app, nil) + connection := newConnection(server, client) + ctx = protocol.WithClient(ctx, connection.client) cc.Go(ctx, protocol.Handlers( - protocol.ClientHandler(connection.Client, - jsonrpc2.MethodNotFound))) + protocol.ClientHandler(client, jsonrpc2.MethodNotFound))) return connection, connection.initialize(ctx, app.options) } @@ -355,7 +359,7 @@ var matcherString = map[source.SymbolMatcher]string{ func (c *connection) initialize(ctx context.Context, options func(*source.Options)) error { params := &protocol.ParamInitialize{} - params.RootURI = protocol.URIFromPath(c.Client.app.wd) + params.RootURI = protocol.URIFromPath(c.client.app.wd) params.Capabilities.Workspace.Configuration = true // Make sure to respect configured options when sending initialize request. @@ -377,7 +381,7 @@ func (c *connection) initialize(ctx context.Context, options func(*source.Option // If the subcommand has registered a progress handler, report the progress // capability. - if c.Client.onProgress != nil { + if c.client.onProgress != nil { params.Capabilities.Window.WorkDoneProgress = true } @@ -395,11 +399,10 @@ func (c *connection) initialize(ctx context.Context, options func(*source.Option type connection struct { protocol.Server - Client *cmdClient + client *cmdClient } type cmdClient struct { - protocol.Server app *Application onProgress func(*protocol.ProgressParams) @@ -417,13 +420,18 @@ type cmdFile struct { diagnostics []protocol.Diagnostic } -func newConnection(app *Application, onProgress func(*protocol.ProgressParams)) *connection { +func newClient(app *Application, onProgress func(*protocol.ProgressParams)) *cmdClient { + return &cmdClient{ + app: app, + onProgress: onProgress, + files: make(map[span.URI]*cmdFile), + } +} + +func newConnection(server protocol.Server, client *cmdClient) *connection { return &connection{ - Client: &cmdClient{ - app: app, - onProgress: onProgress, - files: make(map[span.URI]*cmdFile), - }, + Server: server, + client: client, } } @@ -611,7 +619,7 @@ func (c *cmdClient) openFile(ctx context.Context, uri span.URI) *cmdFile { // - map a (URI, protocol.Range) to a MappedRange; // - parse a command-line argument to a MappedRange. func (c *connection) openFile(ctx context.Context, uri span.URI) (*cmdFile, error) { - file := c.Client.openFile(ctx, uri) + file := c.client.openFile(ctx, uri) if file.err != nil { return nil, file.err } @@ -646,22 +654,22 @@ func (c *connection) diagnoseFiles(ctx context.Context, files []span.URI) error for _, file := range files { untypedFiles = append(untypedFiles, string(file)) } - c.Client.diagnosticsMu.Lock() - defer c.Client.diagnosticsMu.Unlock() + c.client.diagnosticsMu.Lock() + defer c.client.diagnosticsMu.Unlock() - c.Client.diagnosticsDone = make(chan struct{}) + c.client.diagnosticsDone = make(chan struct{}) _, err := c.Server.NonstandardRequest(ctx, "gopls/diagnoseFiles", map[string]interface{}{"files": untypedFiles}) if err != nil { - close(c.Client.diagnosticsDone) + close(c.client.diagnosticsDone) return err } - <-c.Client.diagnosticsDone + <-c.client.diagnosticsDone return nil } func (c *connection) terminate(ctx context.Context) { - if strings.HasPrefix(c.Client.app.Remote, "internal@") { + if strings.HasPrefix(c.client.app.Remote, "internal@") { // internal connections need to be left alive for the next test return } diff --git a/gopls/internal/lsp/cmd/suggested_fix.go b/gopls/internal/lsp/cmd/suggested_fix.go index 2637517c50b..169d6d15cd0 100644 --- a/gopls/internal/lsp/cmd/suggested_fix.go +++ b/gopls/internal/lsp/cmd/suggested_fix.go @@ -71,9 +71,9 @@ func (s *suggestedFix) Run(ctx context.Context, args ...string) error { return err } diagnostics := []protocol.Diagnostic{} // LSP wants non-nil slice - conn.Client.filesMu.Lock() + conn.client.filesMu.Lock() diagnostics = append(diagnostics, file.diagnostics...) - conn.Client.filesMu.Unlock() + conn.client.filesMu.Unlock() // Request code actions codeActionKinds := []protocol.CodeActionKind{protocol.QuickFix} From 5dc3f7433cf12df0d712efc2086441a28232ddce Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 24 May 2023 10:58:11 -0400 Subject: [PATCH 069/109] gopls/internal/lsp/filecache: reenable memory cache layer Change-Id: I6814cb365f81e740e98cd9fa0c723256b69141aa Reviewed-on: https://go-review.googlesource.com/c/tools/+/497955 Reviewed-by: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan --- gopls/internal/lsp/filecache/filecache.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index 140ae97c3f5..d92721fe8c4 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -59,8 +59,6 @@ type memKey struct { key [32]byte } -const useMemCache = false // disabled for now while we debug the new file-based implementation - // Get retrieves from the cache and returns a newly allocated // copy of the value most recently supplied to Set(kind, key), // possibly by another process. @@ -69,10 +67,8 @@ func Get(kind string, key [32]byte) ([]byte, error) { // First consult the read-through memory cache. // Note that memory cache hits do not update the times // used for LRU eviction of the file-based cache. - if useMemCache { - if value := memCache.Get(memKey{kind, key}); value != nil { - return value.([]byte), nil - } + if value := memCache.Get(memKey{kind, key}); value != nil { + return value.([]byte), nil } iolimit <- struct{}{} // acquire a token @@ -130,9 +126,7 @@ func Get(kind string, key [32]byte) ([]byte, error) { touch(indexName) touch(casName) - if useMemCache { - memCache.Set(memKey{kind, key}, value, len(value)) - } + memCache.Set(memKey{kind, key}, value, len(value)) return value, nil } @@ -143,9 +137,7 @@ var ErrNotFound = fmt.Errorf("not found") // Set updates the value in the cache. func Set(kind string, key [32]byte, value []byte) error { - if useMemCache { - memCache.Set(memKey{kind, key}, value, len(value)) - } + memCache.Set(memKey{kind, key}, value, len(value)) iolimit <- struct{}{} // acquire a token defer func() { <-iolimit }() // release a token From e106694df63613d93fd74027bcded961c4f3324c Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 23 May 2023 13:27:17 -0400 Subject: [PATCH 070/109] gopls/internal/lsp: bundle certain quick-fixes with their diagnostic To pragmatically avoid re-diagnosing the entire workspace, we can bundle quick-fixes directly with their corresponding diagnostic, using the Diagnostic.Data field added for this purpose in version 3.16 of the LSP spec. We should use this mechanism more generally, but for fixes with edits we'd have to be careful that the edits are still valid in the current snapshot. For now, be surgical. This is the final regression we're tracking in the incremental gopls issue (golang/go#57987). Fixes golang/go#57987 Change-Id: Iaca91484e90341d677ecf573944edffef6e07255 Reviewed-on: https://go-review.googlesource.com/c/tools/+/497398 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/internal/lsp/cache/check.go | 16 +++- gopls/internal/lsp/code_action.go | 11 +++ gopls/internal/lsp/diagnostics.go | 4 + .../internal/lsp/protocol/generate/tables.go | 1 + gopls/internal/lsp/protocol/tsprotocol.go | 2 +- gopls/internal/lsp/source/diagnostics.go | 80 +++++++++++++++++++ gopls/internal/lsp/source/view.go | 14 +++- .../internal/regtest/modfile/modfile_test.go | 19 ++--- 8 files changed, 128 insertions(+), 19 deletions(-) diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index cf212c6e2e0..663127001e3 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -1542,14 +1542,18 @@ func depsErrors(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs if err != nil { return nil, err } - errors = append(errors, &source.Diagnostic{ + diag := &source.Diagnostic{ URI: imp.cgf.URI, Range: rng, Severity: protocol.SeverityError, Source: source.TypeError, Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), SuggestedFixes: fixes, - }) + } + if !source.BundleQuickFixes(diag) { + bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message) + } + errors = append(errors, diag) } } } @@ -1585,14 +1589,18 @@ func depsErrors(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs if err != nil { return nil, err } - errors = append(errors, &source.Diagnostic{ + diag := &source.Diagnostic{ URI: pm.URI, Range: rng, Severity: protocol.SeverityError, Source: source.TypeError, Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err), SuggestedFixes: fixes, - }) + } + if !source.BundleQuickFixes(diag) { + bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message) + } + errors = append(errors, diag) break } } diff --git a/gopls/internal/lsp/code_action.go b/gopls/internal/lsp/code_action.go index 8658ba5588b..8e817b88bb6 100644 --- a/gopls/internal/lsp/code_action.go +++ b/gopls/internal/lsp/code_action.go @@ -473,6 +473,17 @@ func documentChanges(fh source.FileHandle, edits []protocol.TextEdit) []protocol func codeActionsMatchingDiagnostics(ctx context.Context, snapshot source.Snapshot, pdiags []protocol.Diagnostic, sdiags []*source.Diagnostic) ([]protocol.CodeAction, error) { var actions []protocol.CodeAction + var unbundled []protocol.Diagnostic // diagnostics without bundled code actions in their Data field + for _, pd := range pdiags { + bundled := source.BundledQuickFixes(pd) + if len(bundled) > 0 { + actions = append(actions, bundled...) + } else { + // No bundled actions: keep searching for a match. + unbundled = append(unbundled, pd) + } + } + for _, sd := range sdiags { var diag *protocol.Diagnostic for _, pd := range pdiags { diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go index 90c22321c69..88008d319bc 100644 --- a/gopls/internal/lsp/diagnostics.go +++ b/gopls/internal/lsp/diagnostics.go @@ -145,6 +145,9 @@ func computeDiagnosticHash(diags ...*source.Diagnostic) string { fmt.Fprintf(h, "range: %s\n", d.Range) fmt.Fprintf(h, "severity: %s\n", d.Severity) fmt.Fprintf(h, "source: %s\n", d.Source) + if d.BundledFixes != nil { + fmt.Fprintf(h, "fixes: %s\n", *d.BundledFixes) + } } return fmt.Sprintf("%x", h.Sum(nil)) } @@ -771,6 +774,7 @@ func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnost Source: string(diag.Source), Tags: emptySliceDiagnosticTag(diag.Tags), RelatedInformation: diag.Related, + Data: diag.BundledFixes, } if diag.Code != "" { pdiag.Code = diag.Code diff --git a/gopls/internal/lsp/protocol/generate/tables.go b/gopls/internal/lsp/protocol/generate/tables.go index 126301a05ff..8fb9707e4a1 100644 --- a/gopls/internal/lsp/protocol/generate/tables.go +++ b/gopls/internal/lsp/protocol/generate/tables.go @@ -68,6 +68,7 @@ var renameProp = map[prop]string{ {"Command", "arguments"}: "[]json.RawMessage", {"CompletionItem", "textEdit"}: "TextEdit", {"Diagnostic", "code"}: "interface{}", + {"Diagnostic", "data"}: "json.RawMessage", // delay unmarshalling quickfixes {"DocumentDiagnosticReportPartialResult", "relatedDocuments"}: "map[DocumentURI]interface{}", diff --git a/gopls/internal/lsp/protocol/tsprotocol.go b/gopls/internal/lsp/protocol/tsprotocol.go index 8469aeb4fbd..f8ebb468cef 100644 --- a/gopls/internal/lsp/protocol/tsprotocol.go +++ b/gopls/internal/lsp/protocol/tsprotocol.go @@ -896,7 +896,7 @@ type Diagnostic struct { // line 8525 // notification and `textDocument/codeAction` request. // // @since 3.16.0 - Data interface{} `json:"data,omitempty"` + Data *json.RawMessage `json:"data,omitempty"` } // Client capabilities specific to diagnostic pull requests. diff --git a/gopls/internal/lsp/source/diagnostics.go b/gopls/internal/lsp/source/diagnostics.go index fc08dcfa14c..336e35b25ac 100644 --- a/gopls/internal/lsp/source/diagnostics.go +++ b/gopls/internal/lsp/source/diagnostics.go @@ -6,7 +6,9 @@ package source import ( "context" + "encoding/json" + "golang.org/x/tools/gopls/internal/bug" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/span" ) @@ -136,3 +138,81 @@ func CombineDiagnostics(tdiags []*Diagnostic, adiags []*Diagnostic, outT, outA * *outT = append(*outT, tdiags...) } + +// quickFixesJSON is a JSON-serializable list of quick fixes +// to be saved in the protocol.Diagnostic.Data field. +type quickFixesJSON struct { + // TODO(rfindley): pack some sort of identifier here for later + // lookup/validation? + Fixes []protocol.CodeAction +} + +// BundleQuickFixes attempts to bundle sd.SuggestedFixes into the +// sd.BundledFixes field, so that it can be round-tripped through the client. +// It returns false if the quick-fixes cannot be bundled. +func BundleQuickFixes(sd *Diagnostic) bool { + if len(sd.SuggestedFixes) == 0 { + return true + } + var actions []protocol.CodeAction + for _, fix := range sd.SuggestedFixes { + if fix.Edits != nil { + // For now, we only support bundled code actions that execute commands. + // + // In order to cleanly support bundled edits, we'd have to guarantee that + // the edits were generated on the current snapshot. But this naively + // implies that every fix would have to include a snapshot ID, which + // would require us to republish all diagnostics on each new snapshot. + // + // TODO(rfindley): in order to avoid this additional chatter, we'd need + // to build some sort of registry or other mechanism on the snapshot to + // check whether a diagnostic is still valid. + return false + } + action := protocol.CodeAction{ + Title: fix.Title, + Kind: fix.ActionKind, + Command: fix.Command, + } + actions = append(actions, action) + } + fixes := quickFixesJSON{ + Fixes: actions, + } + data, err := json.Marshal(fixes) + if err != nil { + bug.Reportf("marshalling quick fixes: %v", err) + return false + } + msg := json.RawMessage(data) + sd.BundledFixes = &msg + return true +} + +// BundledQuickFixes extracts any bundled codeActions from the +// diag.Data field. +func BundledQuickFixes(diag protocol.Diagnostic) []protocol.CodeAction { + if diag.Data == nil { + return nil + } + var fix quickFixesJSON + if err := json.Unmarshal(*diag.Data, &fix); err != nil { + bug.Reportf("unmarshalling quick fix: %v", err) + return nil + } + + var actions []protocol.CodeAction + for _, action := range fix.Fixes { + // See BundleQuickFixes: for now we only support bundling commands. + if action.Edit != nil { + bug.Reportf("bundled fix %q includes workspace edits", action.Title) + continue + } + // associate the action with the incoming diagnostic + // (Note that this does not mutate the fix.Fixes slice). + action.Diagnostics = []protocol.Diagnostic{diag} + actions = append(actions, action) + } + + return actions +} diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go index 6dd3811a0a5..2f7a3f27ef2 100644 --- a/gopls/internal/lsp/source/view.go +++ b/gopls/internal/lsp/source/view.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "crypto/sha256" + "encoding/json" "errors" "fmt" "go/ast" @@ -971,7 +972,18 @@ type Diagnostic struct { Related []protocol.DiagnosticRelatedInformation // Fields below are used internally to generate quick fixes. They aren't - // part of the LSP spec and don't leave the server. + // part of the LSP spec and historically didn't leave the server. + // + // Update(2023-05): version 3.16 of the LSP spec included support for the + // Diagnostic.data field, which holds arbitrary data preserved in the + // diagnostic for codeAction requests. This field allows bundling additional + // information for quick-fixes, and gopls can (and should) use this + // information to avoid re-evaluating diagnostics in code-action handlers. + // + // In order to stage this transition incrementally, the 'BundledFixes' field + // may store a 'bundled' (=json-serialized) form of the associated + // SuggestedFixes. Not all diagnostics have their fixes bundled. + BundledFixes *json.RawMessage SuggestedFixes []SuggestedFix } diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go index 03e60ac80e7..855141a7b30 100644 --- a/gopls/internal/regtest/modfile/modfile_test.go +++ b/gopls/internal/regtest/modfile/modfile_test.go @@ -498,14 +498,8 @@ var _ = blah.Name ReadDiagnostics("a/go.mod", &modDiags), ) - // golang.go#57987: now that gopls is incremental, we must be careful where - // we request diagnostics. We must design a simpler way to correlate - // published diagnostics with subsequent code action requests (see also the - // comment in Server.codeAction). - const canRequestCodeActionsForWorkspaceDiagnostics = false - if canRequestCodeActionsForWorkspaceDiagnostics { - env.ApplyQuickFixes("a/go.mod", modDiags.Diagnostics) - const want = `module mod.com + env.ApplyQuickFixes("a/go.mod", modDiags.Diagnostics) + const want = `module mod.com go 1.12 @@ -514,11 +508,10 @@ require ( example.com/blah/v2 v2.0.0 ) ` - env.SaveBuffer("a/go.mod") - env.AfterChange(NoDiagnostics(ForFile("a/main.go"))) - if got := env.BufferText("a/go.mod"); got != want { - t.Fatalf("suggested fixes failed:\n%s", compare.Text(want, got)) - } + env.SaveBuffer("a/go.mod") + env.AfterChange(NoDiagnostics(ForFile("a/main.go"))) + if got := env.BufferText("a/go.mod"); got != want { + t.Fatalf("suggested fixes failed:\n%s", compare.Text(want, got)) } }) } From d44a094d8021450e7930248d50c3f1dcaf093cdb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 23 May 2023 21:50:38 -0400 Subject: [PATCH 071/109] gopls/internal/lsp/cmd: add a stats -anon flag to show anonymous data Also alter Bug storage to serialize bugs as JSON. Change-Id: I58ef96181b6c233333d1dfff39f1587f3cc9dd35 Reviewed-on: https://go-review.googlesource.com/c/tools/+/497755 Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/bug/bug.go | 15 ++-- gopls/internal/bug/bug_test.go | 27 ++++++++ gopls/internal/lsp/cmd/stats.go | 68 ++++++++++++++----- .../internal/lsp/cmd/test/integration_test.go | 12 ++++ gopls/internal/lsp/cmd/usage/stats.hlp | 8 ++- gopls/internal/lsp/filecache/filecache.go | 21 ++++-- 6 files changed, 123 insertions(+), 28 deletions(-) diff --git a/gopls/internal/bug/bug.go b/gopls/internal/bug/bug.go index 1bf7d30a781..f72948bec87 100644 --- a/gopls/internal/bug/bug.go +++ b/gopls/internal/bug/bug.go @@ -17,6 +17,7 @@ import ( "runtime/debug" "sort" "sync" + "time" ) // PanicOnBugs controls whether to panic when bugs are reported. @@ -32,12 +33,15 @@ var ( // A Bug represents an unexpected event or broken invariant. They are used for // capturing metadata that helps us understand the event. +// +// Bugs are JSON-serializable. type Bug struct { - File string // file containing the call to bug.Report - Line int // line containing the call to bug.Report - Description string // description of the bug - Key string // key identifying the bug (file:line if available) - Stack string // call stack + File string // file containing the call to bug.Report + Line int // line containing the call to bug.Report + Description string // description of the bug + Key string // key identifying the bug (file:line if available) + Stack string // call stack + AtTime time.Time // time the bug was reported } // Reportf reports a formatted bug message. @@ -77,6 +81,7 @@ func report(description string) { Description: description, Key: key, Stack: string(debug.Stack()), + AtTime: time.Now(), } mu.Lock() diff --git a/gopls/internal/bug/bug_test.go b/gopls/internal/bug/bug_test.go index 2e36221438c..8ca2aa5fd64 100644 --- a/gopls/internal/bug/bug_test.go +++ b/gopls/internal/bug/bug_test.go @@ -5,8 +5,12 @@ package bug import ( + "encoding/json" "fmt" "testing" + "time" + + "github.com/google/go-cmp/cmp" ) func resetForTesting() { @@ -62,3 +66,26 @@ func TestBugHandler(t *testing.T) { t.Errorf("got %q, want %q", got, want) } } + +func TestBugJSON(t *testing.T) { + b1 := Bug{ + File: "foo.go", + Line: 1, + Description: "a bug", + Key: "foo.go:1", + Stack: "", + AtTime: time.Now(), + } + + data, err := json.Marshal(b1) + if err != nil { + t.Fatal(err) + } + var b2 Bug + if err := json.Unmarshal(data, &b2); err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(b1, b2); diff != "" { + t.Errorf("bugs differ after JSON Marshal/Unmarshal (-b1 +b2):\n%s", diff) + } +} diff --git a/gopls/internal/lsp/cmd/stats.go b/gopls/internal/lsp/cmd/stats.go index f0d2f6db560..a681c5c0642 100644 --- a/gopls/internal/lsp/cmd/stats.go +++ b/gopls/internal/lsp/cmd/stats.go @@ -9,9 +9,11 @@ import ( "encoding/json" "flag" "fmt" + "go/token" "io/fs" "os" "path/filepath" + "reflect" "runtime" "strings" "sync" @@ -24,10 +26,13 @@ import ( "golang.org/x/tools/gopls/internal/lsp/filecache" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/lsp/source" + "golang.org/x/tools/internal/event" ) type stats struct { app *Application + + Anon bool `flag:"anon" help:"hide any fields that may contain user names, file names, or source code"` } func (s *stats) Name() string { return "stats" } @@ -41,14 +46,17 @@ Load the workspace for the current directory, and output a JSON summary of workspace information relevant to performance. As a side effect, this command populates the gopls file cache for the current workspace. +By default, this command may include output that refers to the location or +content of user code. When the -anon flag is set, fields that may refer to user +code are hidden. + Example: - $ gopls stats + $ gopls stats -anon `) printFlagDefaults(f) } func (s *stats) Run(ctx context.Context, args ...string) error { - // This undocumented environment variable allows // the cmd integration test to trigger a call to bug.Report. if msg := os.Getenv("TEST_GOPLS_BUG"); msg != "" { @@ -65,6 +73,10 @@ func (s *stats) Run(ctx context.Context, args ...string) error { return fmt.Errorf("the stats subcommand does not work with -remote") } + if !s.app.Verbose { + event.SetExporter(nil) // don't log errors to stderr + } + stats := GoplsStats{ GOOS: runtime.GOOS, GOARCH: runtime.GOARCH, @@ -139,12 +151,10 @@ func (s *stats) Run(ctx context.Context, args ...string) error { // Gather bug reports produced by any process using // this executable and persisted in the cache. - stats.BugReports = []string{} // non-nil for JSON do("Gathering bug reports", func() error { - cacheDir, reports := filecache.BugReports() - stats.CacheDir = cacheDir - for _, report := range reports { - stats.BugReports = append(stats.BugReports, string(report)) + stats.CacheDir, stats.BugReports = filecache.BugReports() + if stats.BugReports == nil { + stats.BugReports = []goplsbug.Bug{} // non-nil for JSON } return nil }) @@ -186,25 +196,51 @@ func (s *stats) Run(ctx context.Context, args ...string) error { return err } - data, err := json.MarshalIndent(stats, "", " ") + // Filter JSON output to fields that are consistent with s.Anon. + okFields := make(map[string]interface{}) + { + v := reflect.ValueOf(stats) + t := v.Type() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !token.IsExported(f.Name) { + continue + } + if s.Anon && f.Tag.Get("anon") != "ok" { + // Fields that can be served with -anon must be explicitly marked as OK. + continue + } + vf := v.FieldByName(f.Name) + okFields[f.Name] = vf.Interface() + } + } + data, err := json.MarshalIndent(okFields, "", " ") if err != nil { return err } + os.Stdout.Write(data) fmt.Println() return nil } +// GoplsStats holds information extracted from a gopls session in the current +// workspace. +// +// Fields that should be printed with the -anon flag should be explicitly +// marked as `anon:"ok"`. Only fields that cannot refer to user files or code +// should be marked as such. type GoplsStats struct { - GOOS, GOARCH, GOPLSCACHE string - GoVersion string - GoplsVersion string - InitialWorkspaceLoadDuration string // in time.Duration string form + GOOS, GOARCH string `anon:"ok"` + GOPLSCACHE string + GoVersion string `anon:"ok"` + GoplsVersion string `anon:"ok"` + InitialWorkspaceLoadDuration string `anon:"ok"` // in time.Duration string form CacheDir string - BugReports []string - MemStats command.MemStatsResult - WorkspaceStats command.WorkspaceStatsResult - DirStats dirStats + BugReports []goplsbug.Bug + MemStats command.MemStatsResult `anon:"ok"` + WorkspaceStats command.WorkspaceStatsResult `anon:"ok"` + DirStats dirStats `anon:"ok"` } type dirStats struct { diff --git a/gopls/internal/lsp/cmd/test/integration_test.go b/gopls/internal/lsp/cmd/test/integration_test.go index b2c9c29e2e1..52ecb239035 100644 --- a/gopls/internal/lsp/cmd/test/integration_test.go +++ b/gopls/internal/lsp/cmd/test/integration_test.go @@ -756,6 +756,18 @@ package foo } } + // Check that -anon suppresses fields containing user information. + { + res2 := gopls(t, tree, "stats", "-anon") + res2.checkExit(true) + var stats2 cmd.GoplsStats + if err := json.Unmarshal([]byte(res2.stdout), &stats2); err != nil { + t.Fatalf("failed to unmarshal JSON output of stats command: %v", err) + } + if got := len(stats2.BugReports); got > 0 { + t.Errorf("Got %d bug reports with -anon, want 0. Reports:%+v", got, stats2.BugReports) + } + } } // TestFix tests the 'fix' subcommand (../suggested_fix.go). diff --git a/gopls/internal/lsp/cmd/usage/stats.hlp b/gopls/internal/lsp/cmd/usage/stats.hlp index 7694e291cab..71cce07c008 100644 --- a/gopls/internal/lsp/cmd/usage/stats.hlp +++ b/gopls/internal/lsp/cmd/usage/stats.hlp @@ -7,5 +7,11 @@ Load the workspace for the current directory, and output a JSON summary of workspace information relevant to performance. As a side effect, this command populates the gopls file cache for the current workspace. +By default, this command may include output that refers to the location or +content of user code. When the -anon flag is set, fields that may refer to user +code are hidden. + Example: - $ gopls stats + $ gopls stats -anon + -anon + hide any fields that may contain user names, file names, or source code diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index d92721fe8c4..df84693d24d 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -24,6 +24,7 @@ import ( "bytes" "crypto/sha256" "encoding/hex" + "encoding/json" "errors" "fmt" "io" @@ -533,9 +534,13 @@ func init() { // Wait for cache init (bugs in tests happen early). _, _ = getCacheDir() - value := []byte(fmt.Sprintf("%s: %+v", time.Now().Format(time.RFC3339), bug)) - key := sha256.Sum256(value) - _ = Set(bugKind, key, value) + data, err := json.Marshal(bug) + if err != nil { + panic(fmt.Sprintf("error marshalling bug %+v: %v", bug, err)) + } + + key := sha256.Sum256(data) + _ = Set(bugKind, key, data) }) } @@ -543,7 +548,7 @@ func init() { // of all cached bug reports produced by this executable. // It also returns the location of the cache directory // used by this process (or "" on initialization error). -func BugReports() (string, [][]byte) { +func BugReports() (string, []bug.Bug) { // To test this logic, run: // $ TEST_GOPLS_BUG=oops gopls stats # trigger a bug // $ gopls stats # list the bugs @@ -552,7 +557,7 @@ func BugReports() (string, [][]byte) { if err != nil { return "", nil // ignore initialization errors } - var result [][]byte + var result []bug.Bug _ = filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { if err != nil { return nil // ignore readdir/stat errors @@ -566,7 +571,11 @@ func BugReports() (string, [][]byte) { } content, err := Get(bugKind, key) if err == nil { // ignore read errors - result = append(result, content) + var b bug.Bug + if err := json.Unmarshal(content, &b); err != nil { + log.Printf("error marshalling bug %q: %v", string(content), err) + } + result = append(result, b) } } return nil From 9c97539a2c12436a124d693a2874ddbdcfa32f2e Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 24 May 2023 11:02:49 -0400 Subject: [PATCH 072/109] gopls/internal/lsp/cache: remove nested module warning The "you are working in a nested module" warning is often misleading, and is now redundant with the more accurate orphaned file diagnostics added in https://go.dev/cl/494675. Remove it. Change-Id: I22b506de914702adea98449f5e166a6dff06e045 Reviewed-on: https://go-review.googlesource.com/c/tools/+/497956 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan Run-TryBot: Robert Findley gopls-CI: kokoro --- gopls/internal/lsp/cache/load.go | 47 ------------------- .../regtest/diagnostics/diagnostics_test.go | 3 +- .../regtest/workspace/workspace_test.go | 4 +- 3 files changed, 3 insertions(+), 51 deletions(-) diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index 111b0743cb9..939d084492c 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -378,53 +378,6 @@ https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.` return fmt.Errorf(msg), s.applyCriticalErrorToFiles(ctx, msg, openFiles) } - // If the user has one active go.mod file, they may still be editing files - // in nested modules. Check the module of each open file and add warnings - // that the nested module must be opened as a workspace folder. - if len(s.workspaceModFiles) == 1 { - // Get the active root go.mod file to compare against. - var rootMod string - for uri := range s.workspaceModFiles { - rootMod = uri.Filename() - } - rootDir := filepath.Dir(rootMod) - nestedModules := make(map[string][]*Overlay) - for _, fh := range openFiles { - mod, err := findRootPattern(ctx, filepath.Dir(fh.URI().Filename()), "go.mod", s) - if err != nil { - if ctx.Err() != nil { - return ctx.Err(), nil - } - continue - } - if mod == "" { - continue - } - if mod != rootMod && source.InDir(rootDir, mod) { - modDir := filepath.Dir(mod) - nestedModules[modDir] = append(nestedModules[modDir], fh) - } - } - var multiModuleMsg string - if s.view.goversion >= 18 { - multiModuleMsg = `To work on multiple modules at once, please use a go.work file. -See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.` - } else { - multiModuleMsg = `To work on multiple modules at once, please upgrade to Go 1.18 and use a go.work file. -See https://github.com/golang/tools/blob/master/gopls/doc/workspace.md for more information on using workspaces.` - } - // Add a diagnostic to each file in a nested module to mark it as - // "orphaned". Don't show a general diagnostic in the progress bar, - // because the user may still want to edit a file in a nested module. - var srcDiags []*source.Diagnostic - for modDir, files := range nestedModules { - msg := fmt.Sprintf("This file is in %s, which is a nested module in the %s module.\n%s", modDir, rootMod, multiModuleMsg) - srcDiags = append(srcDiags, s.applyCriticalErrorToFiles(ctx, msg, files)...) - } - if len(srcDiags) != 0 { - return fmt.Errorf("You have opened a nested module.\n%s", multiModuleMsg), srcDiags - } - } return nil, nil } diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index c765cb00da1..de675a5a9c1 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -1701,8 +1701,7 @@ func helloHelper() {} env.OpenFile("nested/hello/hello.go") env.AfterChange( Diagnostics(env.AtRegexp("nested/hello/hello.go", "helloHelper")), - Diagnostics(env.AtRegexp("nested/hello/hello.go", "package hello"), WithMessage("nested module")), - OutstandingWork(lsp.WorkspaceLoadFailure, "nested module"), + Diagnostics(env.AtRegexp("nested/hello/hello.go", "package (hello)"), WithMessage("not included in your workspace")), ) }) } diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 88f9f2c2be3..02e3a8c9a59 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -1054,7 +1054,7 @@ func main() {} // package declaration. env.AfterChange( NoDiagnostics(ForFile("main.go")), - Diagnostics(AtPosition("b/main.go", 0, 0)), + Diagnostics(env.AtRegexp("b/main.go", "package (main)")), ) env.WriteWorkspaceFile("go.work", `go 1.16 @@ -1080,7 +1080,7 @@ use ( env.AfterChange( NoDiagnostics(ForFile("main.go")), - Diagnostics(AtPosition("b/main.go", 0, 0)), + Diagnostics(env.AtRegexp("b/main.go", "package (main)")), ) }) } From a12e1a6aeeffe3ab302b89fc9881dcd5fabde01f Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 23 May 2023 12:15:00 -0700 Subject: [PATCH 073/109] go/ssa/interp: implement min/max builtins Updates golang/go#59488. Change-Id: I68c90ddf0f9dea2c6506b9ab43beb522cbdf5fdd Reviewed-on: https://go-review.googlesource.com/c/tools/+/497516 Run-TryBot: Matthew Dempsky TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Tim King --- go/ssa/interp/external.go | 5 + go/ssa/interp/interp_go121_test.go | 12 +++ go/ssa/interp/ops.go | 91 ++++++++++++++++++ go/ssa/interp/testdata/minmax.go | 118 ++++++++++++++++++++++++ go/ssa/interp/testdata/src/math/math.go | 2 + 5 files changed, 228 insertions(+) create mode 100644 go/ssa/interp/interp_go121_test.go create mode 100644 go/ssa/interp/testdata/minmax.go diff --git a/go/ssa/interp/external.go b/go/ssa/interp/external.go index 7a79924e3fb..3e6fb01918a 100644 --- a/go/ssa/interp/external.go +++ b/go/ssa/interp/external.go @@ -70,6 +70,7 @@ func init() { "bytes.IndexByte": ext۰bytes۰IndexByte, "fmt.Sprint": ext۰fmt۰Sprint, "math.Abs": ext۰math۰Abs, + "math.Copysign": ext۰math۰Copysign, "math.Exp": ext۰math۰Exp, "math.Float32bits": ext۰math۰Float32bits, "math.Float32frombits": ext۰math۰Float32frombits, @@ -158,6 +159,10 @@ func ext۰math۰Abs(fr *frame, args []value) value { return math.Abs(args[0].(float64)) } +func ext۰math۰Copysign(fr *frame, args []value) value { + return math.Copysign(args[0].(float64), args[1].(float64)) +} + func ext۰math۰Exp(fr *frame, args []value) value { return math.Exp(args[0].(float64)) } diff --git a/go/ssa/interp/interp_go121_test.go b/go/ssa/interp/interp_go121_test.go new file mode 100644 index 00000000000..381dc4f636e --- /dev/null +++ b/go/ssa/interp/interp_go121_test.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package interp_test + +func init() { + testdataTests = append(testdataTests, "minmax.go") +} diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go index a42d89b4f6b..dd309bf9ca9 100644 --- a/go/ssa/interp/ops.go +++ b/go/ssa/interp/ops.go @@ -1060,6 +1060,11 @@ func callBuiltin(caller *frame, callpos token.Pos, fn *ssa.Builtin, args []value panic(fmt.Sprintf("cap: illegal operand: %T", x)) } + case "min": + return foldLeft(min, args) + case "max": + return foldLeft(max, args) + case "real": switch c := args[0].(type) { case complex64: @@ -1426,3 +1431,89 @@ func checkInterface(i *interpreter, itype *types.Interface, x iface) string { } return "" // ok } + +func foldLeft(op func(value, value) value, args []value) value { + x := args[0] + for _, arg := range args[1:] { + x = op(x, arg) + } + return x +} + +func min(x, y value) value { + switch x := x.(type) { + case float32: + return fmin(x, y.(float32)) + case float64: + return fmin(x, y.(float64)) + } + + // return (y < x) ? y : x + if binop(token.LSS, nil, y, x).(bool) { + return y + } + return x +} + +func max(x, y value) value { + switch x := x.(type) { + case float32: + return fmax(x, y.(float32)) + case float64: + return fmax(x, y.(float64)) + } + + // return (y > x) ? y : x + if binop(token.GTR, nil, y, x).(bool) { + return y + } + return x +} + +// copied from $GOROOT/src/runtime/minmax.go + +type floaty interface{ ~float32 | ~float64 } + +func fmin[F floaty](x, y F) F { + if y != y || y < x { + return y + } + if x != x || x < y || x != 0 { + return x + } + // x and y are both ±0 + // if either is -0, return -0; else return +0 + return forbits(x, y) +} + +func fmax[F floaty](x, y F) F { + if y != y || y > x { + return y + } + if x != x || x > y || x != 0 { + return x + } + // x and y are both ±0 + // if both are -0, return -0; else return +0 + return fandbits(x, y) +} + +func forbits[F floaty](x, y F) F { + switch unsafe.Sizeof(x) { + case 4: + *(*uint32)(unsafe.Pointer(&x)) |= *(*uint32)(unsafe.Pointer(&y)) + case 8: + *(*uint64)(unsafe.Pointer(&x)) |= *(*uint64)(unsafe.Pointer(&y)) + } + return x +} + +func fandbits[F floaty](x, y F) F { + switch unsafe.Sizeof(x) { + case 4: + *(*uint32)(unsafe.Pointer(&x)) &= *(*uint32)(unsafe.Pointer(&y)) + case 8: + *(*uint64)(unsafe.Pointer(&x)) &= *(*uint64)(unsafe.Pointer(&y)) + } + return x +} diff --git a/go/ssa/interp/testdata/minmax.go b/go/ssa/interp/testdata/minmax.go new file mode 100644 index 00000000000..778dcefff63 --- /dev/null +++ b/go/ssa/interp/testdata/minmax.go @@ -0,0 +1,118 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "math" +) + +func main() { + TestMinFloat() + TestMaxFloat() + TestMinMaxInt() + TestMinMaxUint8() + TestMinMaxString() +} + +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } +func fatalf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } + +// derived from $GOROOT/src/runtime/minmax_test.go + +var ( + zero = math.Copysign(0, +1) + negZero = math.Copysign(0, -1) + inf = math.Inf(+1) + negInf = math.Inf(-1) + nan = math.NaN() +) + +var tests = []struct{ min, max float64 }{ + {1, 2}, + {-2, 1}, + {negZero, zero}, + {zero, inf}, + {negInf, zero}, + {negInf, inf}, + {1, inf}, + {negInf, 1}, +} + +var all = []float64{1, 2, -1, -2, zero, negZero, inf, negInf, nan} + +func eq(x, y float64) bool { + return x == y && math.Signbit(x) == math.Signbit(y) +} + +func TestMinFloat() { + for _, tt := range tests { + if z := min(tt.min, tt.max); !eq(z, tt.min) { + errorf("min(%v, %v) = %v, want %v", tt.min, tt.max, z, tt.min) + } + if z := min(tt.max, tt.min); !eq(z, tt.min) { + errorf("min(%v, %v) = %v, want %v", tt.max, tt.min, z, tt.min) + } + } + for _, x := range all { + if z := min(nan, x); !math.IsNaN(z) { + errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + if z := min(x, nan); !math.IsNaN(z) { + errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + } +} + +func TestMaxFloat() { + for _, tt := range tests { + if z := max(tt.min, tt.max); !eq(z, tt.max) { + errorf("max(%v, %v) = %v, want %v", tt.min, tt.max, z, tt.max) + } + if z := max(tt.max, tt.min); !eq(z, tt.max) { + errorf("max(%v, %v) = %v, want %v", tt.max, tt.min, z, tt.max) + } + } + for _, x := range all { + if z := max(nan, x); !math.IsNaN(z) { + errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + if z := max(x, nan); !math.IsNaN(z) { + errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + } + } +} + +// testMinMax tests that min/max behave correctly on every pair of +// values in vals. +// +// vals should be a sequence of values in strictly ascending order. +func testMinMax[T int | uint8 | string](vals ...T) { + for i, x := range vals { + for _, y := range vals[i+1:] { + if !(x < y) { + fatalf("values out of order: !(%v < %v)", x, y) + } + + if z := min(x, y); z != x { + errorf("min(%v, %v) = %v, want %v", x, y, z, x) + } + if z := min(y, x); z != x { + errorf("min(%v, %v) = %v, want %v", y, x, z, x) + } + + if z := max(x, y); z != y { + errorf("max(%v, %v) = %v, want %v", x, y, z, y) + } + if z := max(y, x); z != y { + errorf("max(%v, %v) = %v, want %v", y, x, z, y) + } + } + } +} + +func TestMinMaxInt() { testMinMax[int](-7, 0, 9) } +func TestMinMaxUint8() { testMinMax[uint8](0, 1, 2, 4, 7) } +func TestMinMaxString() { testMinMax[string]("a", "b", "c") } diff --git a/go/ssa/interp/testdata/src/math/math.go b/go/ssa/interp/testdata/src/math/math.go index 64fe60c9921..0fb38706407 100644 --- a/go/ssa/interp/testdata/src/math/math.go +++ b/go/ssa/interp/testdata/src/math/math.go @@ -1,5 +1,7 @@ package math +func Copysign(float64, float64) float64 + func NaN() float64 func Inf(int) float64 From 827f5aa2c3986c6a2fc4af5c464850dd93399d0c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 23 May 2023 14:07:00 -0400 Subject: [PATCH 074/109] gopls/internal/lsp/source: test references bug on struct{p.T} This change adds a regression test for bug in gopls' references operation applied to the T identifier in an embedded struct field such as struct{p.T): instead of reporting references to T, it reports references to package name p. This is a consequence of go/types bug golang/go#60372, which sets the position of the struct field types.Var to that of the ast.Field syntax (p) not the type name (T). The bug was fixed in go1.21. Updates golang/go#60372 Fixes golang/go#60369 Change-Id: Ibabe885ea689b30d966dbf7e51f8c25e44a6ce1c Reviewed-on: https://go-review.googlesource.com/c/tools/+/497495 gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- gopls/internal/lsp/source/references.go | 8 ++++++ .../marker/testdata/references/issue60369.txt | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 gopls/internal/regtest/marker/testdata/references/issue60369.txt diff --git a/gopls/internal/lsp/source/references.go b/gopls/internal/lsp/source/references.go index 939f01a7f07..166c59d5f84 100644 --- a/gopls/internal/lsp/source/references.go +++ b/gopls/internal/lsp/source/references.go @@ -224,6 +224,7 @@ func ordinaryReferences(ctx context.Context, snapshot Snapshot, uri span.URI, pp } // Find the selected object (declaration or reference). + // For struct{T}, we choose the field (Def) over the type (Use). pos, err := pgf.PositionPos(pp) if err != nil { return nil, err @@ -649,6 +650,13 @@ func objectsAt(info *types.Info, file *ast.File, pos token.Pos) (map[types.Objec targets[obj] = leaf } } else { + // Note: prior to go1.21, go/types issue #60372 causes the position + // a field Var T created for struct{*p.T} to be recorded at the + // start of the field type ("*") not the location of the T. + // This affects references and other gopls operations (issue #60369). + // TODO(adonovan): delete this comment when we drop support for go1.20. + + // For struct{T}, we prefer the defined field Var over the used TypeName. obj := info.ObjectOf(leaf) if obj == nil { return nil, nil, fmt.Errorf("%w for %q", errNoObjectFound, leaf.Name) diff --git a/gopls/internal/regtest/marker/testdata/references/issue60369.txt b/gopls/internal/regtest/marker/testdata/references/issue60369.txt new file mode 100644 index 00000000000..c363f35d78e --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/references/issue60369.txt @@ -0,0 +1,27 @@ +Regression test for 'references' bug golang/go#60369: a references +query on the embedded type name T in struct{p.T} instead reports all +references to the package name p. + +The bug was fixed in release go1.21 of go/types. + +-- flags -- +-min_go=go1.21 + +-- go.mod -- +module example.com +go 1.12 + +-- a/a.go -- +package a + +type A struct{} +const C = 0 + +-- b/b.go -- +package b + +import a "example.com/a" //@loc(adef, "a") +type s struct { a.A } //@loc(Aref1, "A"), loc(aref1, "a"), refs(Aref1, Aref1, Aref3), refs(aref1, adef, aref1, aref2, aref3) +var _ a.A //@loc(aref2, re" (a)"), loc(Aref2, "A") +var _ = s{}.A //@loc(Aref3, "A") +const c = a.C //@loc(aref3, "a") From ed90c6d201ea1d0f2e8d12d4ca13635eef866698 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 25 May 2023 10:46:40 -0400 Subject: [PATCH 075/109] internal/diff: unexport various identifiers They were exported only because of unnecessary coupling with another package, solved by copying. Change-Id: I5f08ad9091b8fce10c2bac6383e020a3c45426f6 Reviewed-on: https://go-review.googlesource.com/c/tools/+/498257 Reviewed-by: Robert Findley Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/diff/myers/diff.go | 34 ++++++++-- internal/diff/unified.go | 121 ++++++++++++++++++------------------ 2 files changed, 88 insertions(+), 67 deletions(-) diff --git a/internal/diff/myers/diff.go b/internal/diff/myers/diff.go index 7c2d4356b42..c0f6cce504b 100644 --- a/internal/diff/myers/diff.go +++ b/internal/diff/myers/diff.go @@ -32,10 +32,10 @@ func ComputeEdits(before, after string) []diff.Edit { for _, op := range ops { start, end := lineOffsets[op.I1], lineOffsets[op.I2] switch op.Kind { - case diff.Delete: + case opDelete: // Delete: before[I1:I2] is deleted. edits = append(edits, diff.Edit{Start: start, End: end}) - case diff.Insert: + case opInsert: // Insert: after[J1:J2] is inserted at before[I1:I1]. if content := strings.Join(op.Content, ""); content != "" { edits = append(edits, diff.Edit{Start: start, End: end, New: content}) @@ -45,8 +45,30 @@ func ComputeEdits(before, after string) []diff.Edit { return edits } +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + opDelete opKind = iota // line deleted from input (-) + opInsert // line inserted into output (+) + opEqual // line present in input and output +) + +func (kind opKind) String() string { + switch kind { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown opKind") + } +} + type operation struct { - Kind diff.OpKind + Kind opKind Content []string // content from b I1, I2 int // indices of the line in a J1 int // indices of the line in b, J2 implied by len(Content) @@ -72,7 +94,7 @@ func operations(a, b []string) []*operation { return } op.I2 = i2 - if op.Kind == diff.Insert { + if op.Kind == opInsert { op.Content = b[op.J1:j2] } solution[i] = op @@ -88,7 +110,7 @@ func operations(a, b []string) []*operation { for snake[0]-snake[1] > x-y { if op == nil { op = &operation{ - Kind: diff.Delete, + Kind: opDelete, I1: x, J1: y, } @@ -104,7 +126,7 @@ func operations(a, b []string) []*operation { for snake[0]-snake[1] < x-y { if op == nil { op = &operation{ - Kind: diff.Insert, + Kind: opInsert, I1: x, J1: y, } diff --git a/internal/diff/unified.go b/internal/diff/unified.go index ed2c22e8b9e..3522e1e5b18 100644 --- a/internal/diff/unified.go +++ b/internal/diff/unified.go @@ -36,58 +36,57 @@ func ToUnified(oldLabel, newLabel, content string, edits []Edit) (string, error) // unified represents a set of edits as a unified diff. type unified struct { - // From is the name of the original file. - From string - // To is the name of the modified file. - To string - // Hunks is the set of edit hunks needed to transform the file content. - Hunks []*hunk + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk } // Hunk represents a contiguous set of line edits to apply. type hunk struct { // The line in the original source where the hunk starts. - FromLine int + fromLine int // The line in the original source where the hunk finishes. - ToLine int + toLine int // The set of line based edits to apply. - Lines []line + lines []line } // Line represents a single line operation to apply as part of a Hunk. type line struct { - // Kind is the type of line this represents, deletion, insertion or copy. - Kind OpKind - // Content is the content of this line. + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. // For deletion it is the line being removed, for all others it is the line // to put in the output. - Content string + content string } -// OpKind is used to denote the type of operation a line represents. -// TODO(adonovan): hide this once the myers package no longer references it. -type OpKind int +// opKind is used to denote the type of operation a line represents. +type opKind int const ( - // Delete is the operation kind for a line that is present in the input + // opDelete is the operation kind for a line that is present in the input // but not in the output. - Delete OpKind = iota - // Insert is the operation kind for a line that is new in the output. - Insert - // Equal is the operation kind for a line that is the same in the input and + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and // output, often used to provide context around edited lines. - Equal + opEqual ) // String returns a human readable representation of an OpKind. It is not // intended for machine processing. -func (k OpKind) String() string { +func (k opKind) String() string { switch k { - case Delete: + case opDelete: return "delete" - case Insert: + case opInsert: return "insert" - case Equal: + case opEqual: return "equal" default: panic("unknown operation kind") @@ -103,8 +102,8 @@ const ( // a unified diff that represents those edits. func toUnified(fromName, toName string, content string, edits []Edit) (unified, error) { u := unified{ - From: fromName, - To: toName, + from: fromName, + to: toName, } if len(edits) == 0 { return u, nil @@ -138,21 +137,21 @@ func toUnified(fromName, toName string, content string, edits []Edit) (unified, if h != nil { // add the edge to the previous hunk addEqualLines(h, lines, last, last+edge) - u.Hunks = append(u.Hunks, h) + u.hunks = append(u.hunks, h) } toLine += start - last h = &hunk{ - FromLine: start + 1, - ToLine: toLine + 1, + fromLine: start + 1, + toLine: toLine + 1, } // add the edge to the new hunk delta := addEqualLines(h, lines, start-edge, start) - h.FromLine -= delta - h.ToLine -= delta + h.fromLine -= delta + h.toLine -= delta } last = start for i := start; i < end; i++ { - h.Lines = append(h.Lines, line{Kind: Delete, Content: lines[i]}) + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) last++ } if edit.New != "" { @@ -163,18 +162,18 @@ func toUnified(fromName, toName string, content string, edits []Edit) (unified, // that is easiest to fix by postprocessing. // e.g. issue #59232: ("aaa\nccc\n", "aaa\nbbb\nccc") // -> [Delete "aaa\n", Insert "aaa\n", Insert "bbb\n", ...]. - if i == 0 && last > start && h.Lines[len(h.Lines)-1].Content == content { - h.Lines[len(h.Lines)-1].Kind = Equal + if i == 0 && last > start && h.lines[len(h.lines)-1].content == content { + h.lines[len(h.lines)-1].kind = opEqual continue } - h.Lines = append(h.Lines, line{Kind: Insert, Content: content}) + h.lines = append(h.lines, line{kind: opInsert, content: content}) } } } if h != nil { // add the edge to the final hunk addEqualLines(h, lines, last, last+edge) - u.Hunks = append(u.Hunks, h) + u.hunks = append(u.hunks, h) } return u, nil } @@ -196,7 +195,7 @@ func addEqualLines(h *hunk, lines []string, start, end int) int { if i >= len(lines) { return delta } - h.Lines = append(h.Lines, line{Kind: Equal, Content: lines[i]}) + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) delta++ } return delta @@ -205,19 +204,19 @@ func addEqualLines(h *hunk, lines []string, start, end int) int { // String converts a unified diff to the standard textual form for that diff. // The output of this function can be passed to tools like patch. func (u unified) String() string { - if len(u.Hunks) == 0 { + if len(u.hunks) == 0 { return "" } b := new(strings.Builder) - fmt.Fprintf(b, "--- %s\n", u.From) - fmt.Fprintf(b, "+++ %s\n", u.To) - for _, hunk := range u.Hunks { + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { fromCount, toCount := 0, 0 - for _, l := range hunk.Lines { - switch l.Kind { - case Delete: + for _, l := range hunk.lines { + switch l.kind { + case opDelete: fromCount++ - case Insert: + case opInsert: toCount++ default: fromCount++ @@ -226,32 +225,32 @@ func (u unified) String() string { } fmt.Fprint(b, "@@") if fromCount > 1 { - fmt.Fprintf(b, " -%d,%d", hunk.FromLine, fromCount) - } else if hunk.FromLine == 1 && fromCount == 0 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { // Match odd GNU diff -u behavior adding to empty file. fmt.Fprintf(b, " -0,0") } else { - fmt.Fprintf(b, " -%d", hunk.FromLine) + fmt.Fprintf(b, " -%d", hunk.fromLine) } if toCount > 1 { - fmt.Fprintf(b, " +%d,%d", hunk.ToLine, toCount) - } else if hunk.ToLine == 1 && toCount == 0 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { // Match odd GNU diff -u behavior adding to empty file. fmt.Fprintf(b, " +0,0") } else { - fmt.Fprintf(b, " +%d", hunk.ToLine) + fmt.Fprintf(b, " +%d", hunk.toLine) } fmt.Fprint(b, " @@\n") - for _, l := range hunk.Lines { - switch l.Kind { - case Delete: - fmt.Fprintf(b, "-%s", l.Content) - case Insert: - fmt.Fprintf(b, "+%s", l.Content) + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) default: - fmt.Fprintf(b, " %s", l.Content) + fmt.Fprintf(b, " %s", l.content) } - if !strings.HasSuffix(l.Content, "\n") { + if !strings.HasSuffix(l.content, "\n") { fmt.Fprintf(b, "\n\\ No newline at end of file\n") } } From 5f74ec7da5f5b84a3d05750000a35e486c0edece Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 26 May 2023 11:25:42 -0400 Subject: [PATCH 076/109] internal/lsp/debug: add links to profiles and GC This change adds to the memstats page a button to run the GC and reload the stats. It also adds the missing navigation link to the menu of pprof profile types. Change-Id: Icf91111ce6e253cd2040725a0b86178209ebec1f Reviewed-on: https://go-review.googlesource.com/c/tools/+/498558 Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- gopls/internal/lsp/debug/serve.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/gopls/internal/lsp/debug/serve.go b/gopls/internal/lsp/debug/serve.go index f36a2385739..bc60fba5fbe 100644 --- a/gopls/internal/lsp/debug/serve.go +++ b/gopls/internal/lsp/debug/serve.go @@ -460,11 +460,11 @@ func (i *Instance) Serve(ctx context.Context, addr string) (string, error) { mux.HandleFunc("/memory", render(MemoryTmpl, getMemory)) // Internal debugging helpers. - mux.HandleFunc("/_dogc", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/gc", func(w http.ResponseWriter, r *http.Request) { runtime.GC() runtime.GC() runtime.GC() - http.Error(w, "OK", 200) + http.Redirect(w, r, "/memory", http.StatusTemporaryRedirect) }) mux.HandleFunc("/_makeabug", func(w http.ResponseWriter, r *http.Request) { bug.Report("bug here") @@ -647,6 +647,7 @@ ul.spans { Main Info Memory +Profiling Metrics RPC Trace @@ -716,9 +717,10 @@ var InfoTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` `)) var MemoryTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(` -{{define "title"}}GoPls memory usage{{end}} +{{define "title"}}Gopls memory usage{{end}} {{define "head"}}{{end}} {{define "body"}} +

Stats

From f3faea1982c3b87f69b407fc0add0e10c591b9dc Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 26 May 2023 10:23:39 -0400 Subject: [PATCH 077/109] go/packages: pass -pgo=off on go1.21 and later PGO variants can be costly to compute, and can result in unnecessary packages for the caller. They are also unlikely to be useful. Disable these variants by default, setting -pgo=off on go1.21 and later. If someone wants to see PGO variants we can have a NeedPGO flag, but that should be a separate proposal. Fixes golang/go#60456 Change-Id: Ifc706fe7503f841cbe4fa4326b08717a70b4368b Reviewed-on: https://go-review.googlesource.com/c/tools/+/498557 TryBot-Result: Gopher Robot Reviewed-by: Bryan Mills Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan --- go/packages/golist.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/go/packages/golist.go b/go/packages/golist.go index 6bb7168d2e3..aa2ca3bd684 100644 --- a/go/packages/golist.go +++ b/go/packages/golist.go @@ -891,6 +891,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) From 5974258e689a4f8a93448a0d181737afa4506e3f Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 26 May 2023 12:59:48 -0400 Subject: [PATCH 078/109] gopls/internal/lsp: clear vuln diagnostics on config changes Clear vuln diagnostics before diagnosing views, during handling of config changes. This should mostly avoid the symptoms of golang/go#60465, but as noted in the code is not a proper fix. Unfortunately there is no way to write a test for this behavior that is not flaky, because the operation itself is flaky. I have scheduled a proper fix for gopls@v0.12.1. Fixes golang/go#60465 Change-Id: If41f5420c24dfa15a7d83e89988488619a2dd857 Reviewed-on: https://go-review.googlesource.com/c/tools/+/498560 gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/snapshot.go | 4 ++++ gopls/internal/lsp/workspace.go | 22 +++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index de9524bf0ae..7e0a9ba196b 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -2156,6 +2156,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC if !change.exists { result.files.Delete(uri) } else { + // TODO(golang/go#57558): the line below is strictly necessary to ensure + // that snapshots have each overlay, but it is problematic that we must + // set any content in snapshot.clone: if the file has changed, let it be + // re-read. result.files.Set(uri, change.fileHandle) } diff --git a/gopls/internal/lsp/workspace.go b/gopls/internal/lsp/workspace.go index 818135e94a2..e5f813e730c 100644 --- a/gopls/internal/lsp/workspace.go +++ b/gopls/internal/lsp/workspace.go @@ -61,10 +61,30 @@ func (s *Server) didChangeConfiguration(ctx context.Context, _ *protocol.DidChan if err := s.fetchConfig(ctx, view.Name(), view.Folder(), options); err != nil { return err } - view, err := s.session.SetViewOptions(ctx, view, options) + _, err := s.session.SetViewOptions(ctx, view, options) if err != nil { return err } + } + + // Now that all views have been updated: reset vulncheck diagnostics, rerun + // diagnostics, and hope for the best... + // + // TODO(golang/go#60465): this not a reliable way to ensure the correctness + // of the resulting diagnostics below. A snapshot could still be in the + // process of diagnosing the workspace, and not observe the configuration + // changes above. + // + // The real fix is golang/go#42814: we should create a new snapshot on any + // change that could affect the derived results in that snapshot. However, we + // are currently (2023-05-26) on the verge of a release, and the proper fix + // is too risky a change. Since in the common case a configuration change is + // only likely to occur during a period of quiescence on the server, it is + // likely that the clearing below will have the desired effect. + s.clearDiagnosticSource(modVulncheckSource) + + for _, view := range s.session.Views() { + view := view go func() { snapshot, release, err := view.Snapshot() if err != nil { From 933c7ccb15451459ca4fe53c041a4108f4859d91 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 25 May 2023 16:26:39 -0400 Subject: [PATCH 079/109] internal/lsp/source: use exact match in import highlighting Previously, hovering on a package name such as http.XYZ would highlight its import path ("net/http"), but also any other one that contained it as a substring, such as "net/http/httptest". (This behavior was there from the outset in CL 215258, but wasn't remarked upon during the review.) This change uses exact matching based on type-checker objects, not strings,, adds a test of same, and clarifies the logic. Fixes golang/go#60435 Change-Id: I9cc07dbcdaf54707d17be2a162bfcb0a22aa440a Reviewed-on: https://go-review.googlesource.com/c/tools/+/498268 Auto-Submit: Alan Donovan Reviewed-by: Robert Findley Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/source/highlight.go | 114 +++++++++--------- gopls/internal/lsp/source/util.go | 8 +- gopls/internal/lsp/source/xrefs/xrefs.go | 11 +- .../lsp/testdata/highlights/issue60435.go | 14 +++ .../internal/lsp/testdata/summary.txt.golden | 2 +- .../lsp/testdata/summary_go1.18.txt.golden | 2 +- .../lsp/testdata/summary_go1.21.txt.golden | 2 +- 7 files changed, 76 insertions(+), 77 deletions(-) create mode 100644 gopls/internal/lsp/testdata/highlights/issue60435.go diff --git a/gopls/internal/lsp/source/highlight.go b/gopls/internal/lsp/source/highlight.go index ad13d253df9..adfc659e20c 100644 --- a/gopls/internal/lsp/source/highlight.go +++ b/gopls/internal/lsp/source/highlight.go @@ -10,7 +10,6 @@ import ( "go/ast" "go/token" "go/types" - "strings" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/gopls/internal/lsp/protocol" @@ -67,10 +66,27 @@ func highlightPath(path []ast.Node, file *ast.File, info *types.Info) (map[posRa result := make(map[posRange]struct{}) switch node := path[0].(type) { case *ast.BasicLit: + // Import path string literal? if len(path) > 1 { - if _, ok := path[1].(*ast.ImportSpec); ok { - err := highlightImportUses(path, info, result) - return result, err + if imp, ok := path[1].(*ast.ImportSpec); ok { + highlight := func(n ast.Node) { + result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} + } + + // Highlight the import itself... + highlight(imp) + + // ...and all references to it in the file. + if pkgname, ok := ImportedPkgName(info, imp); ok { + ast.Inspect(file, func(n ast.Node) bool { + if id, ok := n.(*ast.Ident); ok && + info.Uses[id] == pkgname { + highlight(id) + } + return true + }) + } + return result, nil } } highlightFuncControlFlow(path, result) @@ -419,66 +435,46 @@ Outer: }) } -func highlightImportUses(path []ast.Node, info *types.Info, result map[posRange]struct{}) error { - basicLit, ok := path[0].(*ast.BasicLit) - if !ok { - return fmt.Errorf("highlightImportUses called with an ast.Node of type %T", basicLit) - } - ast.Inspect(path[len(path)-1], func(node ast.Node) bool { - if imp, ok := node.(*ast.ImportSpec); ok && imp.Path == basicLit { - result[posRange{start: node.Pos(), end: node.End()}] = struct{}{} - return false - } - n, ok := node.(*ast.Ident) - if !ok { - return true - } - obj, ok := info.ObjectOf(n).(*types.PkgName) - if !ok { - return true - } - if !strings.Contains(basicLit.Value, obj.Name()) { - return true - } +func highlightIdentifier(id *ast.Ident, file *ast.File, info *types.Info, result map[posRange]struct{}) { + highlight := func(n ast.Node) { result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} - return false - }) - return nil -} + } -func highlightIdentifier(id *ast.Ident, file *ast.File, info *types.Info, result map[posRange]struct{}) { - // TODO(rfindley): idObj may be nil. Note that returning early in this case - // causes tests to fail (because the nObj == idObj check below was succeeded - // for nil == nil!) - // - // Revisit this. If ObjectOf is nil, there are type errors, and it seems - // reasonable for identifier highlighting not to work. - idObj := info.ObjectOf(id) - pkgObj, isImported := idObj.(*types.PkgName) - ast.Inspect(file, func(node ast.Node) bool { - if imp, ok := node.(*ast.ImportSpec); ok && isImported { - highlightImport(pkgObj, imp, result) - } - n, ok := node.(*ast.Ident) - if !ok { - return true - } - if n.Name != id.Name { - return false - } - if nObj := info.ObjectOf(n); nObj == idObj { - result[posRange{start: n.Pos(), end: n.End()}] = struct{}{} + // obj may be nil if the Ident is undefined. + // In this case, the behavior expected by tests is + // to match other undefined Idents of the same name. + obj := info.ObjectOf(id) + + ast.Inspect(file, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.Ident: + if n.Name == id.Name && info.ObjectOf(n) == obj { + highlight(n) + } + + case *ast.ImportSpec: + pkgname, ok := ImportedPkgName(info, n) + if ok && pkgname == obj { + if n.Name != nil { + highlight(n.Name) + } else { + highlight(n) + } + } } - return false + return true }) } -func highlightImport(obj *types.PkgName, imp *ast.ImportSpec, result map[posRange]struct{}) { - if imp.Name != nil || imp.Path == nil { - return - } - if !strings.Contains(imp.Path.Value, obj.Name()) { - return +// ImportedPkgName returns the PkgName object declared by an ImportSpec. +// TODO(adonovan): make this a method of types.Info. +func ImportedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { + var obj types.Object + if imp.Name != nil { + obj = info.Defs[imp.Name] + } else { + obj = info.Implicits[imp] } - result[posRange{start: imp.Path.Pos(), end: imp.Path.End()}] = struct{}{} + pkgname, ok := obj.(*types.PkgName) + return pkgname, ok } diff --git a/gopls/internal/lsp/source/util.go b/gopls/internal/lsp/source/util.go index cbb17809496..d0ecd50ac6b 100644 --- a/gopls/internal/lsp/source/util.go +++ b/gopls/internal/lsp/source/util.go @@ -255,13 +255,7 @@ func Qualifier(f *ast.File, pkg *types.Package, info *types.Info) types.Qualifie // Construct mapping of import paths to their defined or implicit names. imports := make(map[*types.Package]string) for _, imp := range f.Imports { - var obj types.Object - if imp.Name != nil { - obj = info.Defs[imp.Name] - } else { - obj = info.Implicits[imp] - } - if pkgname, ok := obj.(*types.PkgName); ok { + if pkgname, ok := ImportedPkgName(info, imp); ok { imports[pkgname.Imported()] = pkgname.Name() } } diff --git a/gopls/internal/lsp/source/xrefs/xrefs.go b/gopls/internal/lsp/source/xrefs/xrefs.go index 6231f888430..36463c26972 100644 --- a/gopls/internal/lsp/source/xrefs/xrefs.go +++ b/gopls/internal/lsp/source/xrefs/xrefs.go @@ -87,16 +87,11 @@ func Index(files []*source.ParsedGoFile, pkg *types.Package, info *types.Info) [ case *ast.ImportSpec: // Report a reference from each import path // string to the imported package. - var obj types.Object - if n.Name != nil { - obj = info.Defs[n.Name] - } else { - obj = info.Implicits[n] - } - if obj == nil { + pkgname, ok := source.ImportedPkgName(info, n) + if !ok { return true // missing import } - objects := getObjects(obj.(*types.PkgName).Imported()) + objects := getObjects(pkgname.Imported()) gobObj, ok := objects[nil] if !ok { gobObj = &gobObject{Path: ""} diff --git a/gopls/internal/lsp/testdata/highlights/issue60435.go b/gopls/internal/lsp/testdata/highlights/issue60435.go new file mode 100644 index 00000000000..de0070e5832 --- /dev/null +++ b/gopls/internal/lsp/testdata/highlights/issue60435.go @@ -0,0 +1,14 @@ +package highlights + +import ( + "net/http" //@mark(httpImp, `"net/http"`) + "net/http/httptest" //@mark(httptestImp, `"net/http/httptest"`) +) + +// This is a regression test for issue 60435: +// Highlighting "net/http" shouldn't have any effect +// on an import path that contains it as a substring, +// such as httptest. + +var _ = httptest.NewRequest +var _ = http.NewRequest //@mark(here, "http"), highlight(here, here, httpImp) diff --git a/gopls/internal/lsp/testdata/summary.txt.golden b/gopls/internal/lsp/testdata/summary.txt.golden index c572e268f7f..e6cee0c9b3c 100644 --- a/gopls/internal/lsp/testdata/summary.txt.golden +++ b/gopls/internal/lsp/testdata/summary.txt.golden @@ -15,7 +15,7 @@ SuggestedFixCount = 73 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 -HighlightsCount = 69 +HighlightsCount = 70 InlayHintsCount = 4 RenamesCount = 41 PrepareRenamesCount = 7 diff --git a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden index da3b553834c..4d847b42511 100644 --- a/gopls/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.18.txt.golden @@ -15,7 +15,7 @@ SuggestedFixCount = 79 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 -HighlightsCount = 69 +HighlightsCount = 70 InlayHintsCount = 5 RenamesCount = 48 PrepareRenamesCount = 7 diff --git a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden index 52fba365236..9c1b504ab7c 100644 --- a/gopls/internal/lsp/testdata/summary_go1.21.txt.golden +++ b/gopls/internal/lsp/testdata/summary_go1.21.txt.golden @@ -15,7 +15,7 @@ SuggestedFixCount = 79 MethodExtractionCount = 6 DefinitionsCount = 46 TypeDefinitionsCount = 18 -HighlightsCount = 69 +HighlightsCount = 70 InlayHintsCount = 5 RenamesCount = 48 PrepareRenamesCount = 7 From 33c741de78259c8c12df7103f7a6caf8dd9c6181 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 26 May 2023 11:29:44 +0700 Subject: [PATCH 080/109] gopls/internal/lsp: add min/max builtin For golang/go#59488 Change-Id: I43d9a5b644a9c3ce647a11f9e2b647093b070c9f Reviewed-on: https://go-review.googlesource.com/c/tools/+/498515 Reviewed-by: Matthew Dempsky Run-TryBot: Cuong Manh Le gopls-CI: kokoro Reviewed-by: Robert Findley TryBot-Result: Gopher Robot Auto-Submit: Cuong Manh Le --- gopls/internal/lsp/completion_test.go | 17 +++++++++++++++-- .../lsp/testdata/builtins/builtin_go121.go | 2 +- .../internal/lsp/testdata/builtins/builtins.go | 2 ++ gopls/internal/lsp/tests/util_go121.go | 2 ++ 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/gopls/internal/lsp/completion_test.go b/gopls/internal/lsp/completion_test.go index 1fc7304fc43..bef5e11e340 100644 --- a/gopls/internal/lsp/completion_test.go +++ b/gopls/internal/lsp/completion_test.go @@ -25,8 +25,8 @@ func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal") opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix") }) - got = tests.FilterBuiltins(src, got) - want := expected(t, test, items) + got = filterSkipCompletionItems(tests.FilterBuiltins(src, got)) + want := filterSkipCompletionItems(expected(t, test, items)) if diff := tests.DiffCompletionItems(want, got); diff != "" { t.Errorf("mismatching completion items (-want +got):\n%s", diff) } @@ -175,3 +175,16 @@ func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*sourc } return list.Items } + +func filterSkipCompletionItems(items []protocol.CompletionItem) []protocol.CompletionItem { + n := 0 + for _, item := range items { + // TODO(cuonglm): remove once https://go-review.googlesource.com/c/go/+/498495 land. + if item.Label == "max" || item.Label == "min" { + continue + } + items[n] = item + n++ + } + return items[:n] +} diff --git a/gopls/internal/lsp/testdata/builtins/builtin_go121.go b/gopls/internal/lsp/testdata/builtins/builtin_go121.go index cb8e8fae3ab..a52d168636e 100644 --- a/gopls/internal/lsp/testdata/builtins/builtin_go121.go +++ b/gopls/internal/lsp/testdata/builtins/builtin_go121.go @@ -4,5 +4,5 @@ package builtins func _() { - //@complete("", any, append, bool, byte, cap, clear, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) + //@complete("", any, append, bool, byte, cap, clear, close, comparable, complex, complex128, complex64, copy, delete, error, _false, float32, float64, imag, int, int16, int32, int64, int8, len, make, max, min, new, panic, print, println, real, recover, rune, string, _true, uint, uint16, uint32, uint64, uint8, uintptr, _nil) } diff --git a/gopls/internal/lsp/testdata/builtins/builtins.go b/gopls/internal/lsp/testdata/builtins/builtins.go index 75c6e418312..47fa682e8d7 100644 --- a/gopls/internal/lsp/testdata/builtins/builtins.go +++ b/gopls/internal/lsp/testdata/builtins/builtins.go @@ -28,6 +28,8 @@ package builtins /* int8 */ //@item(int8, "int8", "", "type") /* iota */ //@item(iota, "iota", "", "const") /* len(v Type) int */ //@item(len, "len", "func(v Type) int", "func") +/* max(x Type, y ...Type) Type */ //@item(max, "max", "func(x Type, y ...Type) Type", "func") +/* min(y Type, y ...Type) Type */ //@item(min, "min", "func(y Type, y ...Type) Type", "func") /* make(t Type, size ...int) Type */ //@item(make, "make", "func(t Type, size ...int) Type", "func") /* new(Type) *Type */ //@item(new, "new", "func(Type) *Type", "func") /* nil */ //@item(_nil, "nil", "", "var") diff --git a/gopls/internal/lsp/tests/util_go121.go b/gopls/internal/lsp/tests/util_go121.go index 93065864802..c5b2278580b 100644 --- a/gopls/internal/lsp/tests/util_go121.go +++ b/gopls/internal/lsp/tests/util_go121.go @@ -9,4 +9,6 @@ package tests func init() { builtins["clear"] = true + builtins["max"] = true + builtins["min"] = true } From cd694d8db4f10068e4fe38fe7aa1aa8bda65b88b Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 2 May 2023 12:26:56 -0400 Subject: [PATCH 081/109] go/packages: include "unsafe".GoFiles=["unsafe.go"] This change causes the unsafe package's GoFiles list to include unsafe.go, and documents the field more precisely. This file is never compiled, but it is legal Go syntax, and serves as documentation. It is useful for client tools to know where to find it. Also, remove the corresponding workaround in gopls. Fixes golang/go#59929 Change-Id: I4ef9f4c16c5b5b74ee7a7c4d1f7eb3736f779b91 Reviewed-on: https://go-review.googlesource.com/c/tools/+/491375 Reviewed-by: Robert Findley Run-TryBot: Alan Donovan Auto-Submit: Alan Donovan Reviewed-by: Michael Matloob TryBot-Result: Gopher Robot --- go/packages/golist.go | 14 +++++----- go/packages/packages.go | 3 +++ go/packages/packages_test.go | 14 ++++++---- gopls/internal/lsp/cache/load.go | 26 ------------------- gopls/internal/lsp/cache/view.go | 4 +-- .../internal/regtest/misc/references_test.go | 2 +- .../regtest/misc/workspace_symbol_test.go | 3 --- 7 files changed, 20 insertions(+), 46 deletions(-) diff --git a/go/packages/golist.go b/go/packages/golist.go index aa2ca3bd684..e84f19dfa98 100644 --- a/go/packages/golist.go +++ b/go/packages/golist.go @@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. diff --git a/go/packages/packages.go b/go/packages/packages.go index 0f1505b808a..632be722a2b 100644 --- a/go/packages/packages.go +++ b/go/packages/packages.go @@ -308,6 +308,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go index d0960c8b521..a89887f171c 100644 --- a/go/packages/packages_test.go +++ b/go/packages/packages_test.go @@ -217,7 +217,7 @@ func testLoadImportsGraph(t *testing.T, exporter packagestest.Exporter) { id string wantName string wantKind string - wantSrcs string + wantSrcs string // = {Go,Other,Embed}Files wantIgnored string }{ {"golang.org/fake/a", "a", "package", "a.go", ""}, @@ -227,7 +227,7 @@ func testLoadImportsGraph(t *testing.T, exporter packagestest.Exporter) { {"container/list", "list", "package", "list.go", ""}, {"golang.org/fake/subdir/d", "d", "package", "d.go", ""}, {"golang.org/fake/subdir/d.test", "main", "command", "0.go", ""}, - {"unsafe", "unsafe", "package", "", ""}, + {"unsafe", "unsafe", "package", "unsafe.go", ""}, } { p, ok := all[test.id] if !ok { @@ -250,10 +250,10 @@ func testLoadImportsGraph(t *testing.T, exporter packagestest.Exporter) { } if srcs := strings.Join(srcs(p), " "); srcs != test.wantSrcs { - t.Errorf("%s.Srcs = [%s], want [%s]", test.id, srcs, test.wantSrcs) + t.Errorf("%s.{Go,Other,Embed}Files = [%s], want [%s]", test.id, srcs, test.wantSrcs) } if ignored := strings.Join(cleanPaths(p.IgnoredFiles), " "); ignored != test.wantIgnored { - t.Errorf("%s.Srcs = [%s], want [%s]", test.id, ignored, test.wantIgnored) + t.Errorf("%s.IgnoredFiles = [%s], want [%s]", test.id, ignored, test.wantIgnored) } } @@ -2788,7 +2788,11 @@ func errorMessages(errors []packages.Error) []string { } func srcs(p *packages.Package) []string { - return cleanPaths(append(append(p.GoFiles[:len(p.GoFiles):len(p.GoFiles)], p.OtherFiles...), p.EmbedFiles...)) + var files []string + files = append(files, p.GoFiles...) + files = append(files, p.OtherFiles...) + files = append(files, p.EmbedFiles...) + return cleanPaths(files) } // cleanPaths attempts to reduce path names to stable forms diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go index 939d084492c..da985a8ea38 100644 --- a/gopls/internal/lsp/cache/load.go +++ b/gopls/internal/lsp/cache/load.go @@ -159,32 +159,6 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc return bug.Errorf("internal error: go/packages returned multiple packages for standalone file") } - // Workaround for a bug (?) that has been in go/packages since - // the outset: Package("unsafe").GoFiles=[], whereas it should - // include unsafe/unsafe.go. Derive it from builtins.go. - // - // This workaround relies on the fact that we always add both - // builtins and unsafe to the set of scopes in the workspace load. - // - // TODO(adonovan): fix upstream in go/packages. - // (Does this need a proposal? Arguably not.) - { - var builtin, unsafe *packages.Package - for _, pkg := range pkgs { - switch pkg.ID { - case "unsafe": - unsafe = pkg - case "builtin": - builtin = pkg - } - } - if builtin != nil && unsafe != nil && len(builtin.GoFiles) == 1 { - unsafe.GoFiles = []string{ - filepath.Join(filepath.Dir(builtin.GoFiles[0]), "../unsafe/unsafe.go"), - } - } - } - moduleErrs := make(map[string][]packages.Error) // module path -> errors filterFunc := s.view.filterFunc() newMetadata := make(map[PackageID]*source.Metadata) diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go index 1dc13aaee8c..74a07cf5536 100644 --- a/gopls/internal/lsp/cache/view.go +++ b/gopls/internal/lsp/cache/view.go @@ -862,10 +862,8 @@ func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) (loadEr // If we're loading anything, ensure we also load builtin, // since it provides fake definitions (and documentation) // for types like int that are used everywhere. - // ("unsafe" is also needed since its sole GoFiles is - // derived from that of "builtin" via a workaround in load.) if len(scopes) > 0 { - scopes = append(scopes, packageLoadScope("builtin"), packageLoadScope("unsafe")) + scopes = append(scopes, packageLoadScope("builtin")) } loadErr = s.load(ctx, true, scopes...) diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index 1e14f1bbbbb..a85bcc27d61 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -104,7 +104,7 @@ func _() { func TestDefsRefsBuiltins(t *testing.T) { testenv.NeedsGo1Point(t, 17) // for unsafe.{Add,Slice} - // TODO(adonovan): add unsafe.SliceData,String,StringData} in later go versions. + // TODO(adonovan): add unsafe.{SliceData,String,StringData} in later go versions. const files = ` -- go.mod -- module example.com diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go index 849743b5b10..7b2866e98a9 100644 --- a/gopls/internal/regtest/misc/workspace_symbol_test.go +++ b/gopls/internal/regtest/misc/workspace_symbol_test.go @@ -32,8 +32,6 @@ package exclude const K2 = "exclude.go" ` - // NB: the name K was chosen to avoid spurious - // matches in the always-present "unsafe" package. Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a.go") checkSymbols(env, "K", "K1") @@ -73,7 +71,6 @@ const ( "Fooex", // shorter than Fooest, FooBar, lexically before Fooey "Fooey", // shorter than Fooest, Foobar "Fooest", - "unsafe.Offsetof", // a very fuzzy match ) }) } From 96844c3594b1a5b71b689f4d38697c012e093011 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 31 May 2023 16:20:00 -0400 Subject: [PATCH 082/109] cmd/{guru,callgraph}: stop using go/pointer This change removes the -algo=pta option from cmd/callgraph, and all the subcommands of cmd/guru, that use pointer analysis. These features have been poorly supported for a long time, and the pointer analysis package is about to be tagged and deleted. Updates golang/go#59676 Change-Id: Id4ded651b8385c588991d01377b2f087d14ae191 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499696 gopls-CI: kokoro Reviewed-by: Bryan Mills Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- cmd/callgraph/main.go | 51 +-- cmd/callgraph/main_test.go | 16 - cmd/guru/callees.go | 257 -------------- cmd/guru/callers.go | 194 ----------- cmd/guru/callstack.go | 140 -------- cmd/guru/guru.go | 68 +--- cmd/guru/guru_test.go | 20 +- cmd/guru/implements.go | 7 +- cmd/guru/main.go | 39 +-- cmd/guru/peers.go | 252 -------------- cmd/guru/pointsto.go | 287 --------------- cmd/guru/serial/serial.go | 6 - cmd/guru/testdata/src/calls-json/main.go | 16 - cmd/guru/testdata/src/calls-json/main.golden | 28 -- cmd/guru/testdata/src/calls/main.go | 129 ------- cmd/guru/testdata/src/calls/main.golden | 125 ------- cmd/guru/testdata/src/imports/main.go | 2 +- cmd/guru/testdata/src/imports/main.golden | 4 - cmd/guru/testdata/src/peers-json/main.go | 13 - cmd/guru/testdata/src/peers-json/main.golden | 12 - cmd/guru/testdata/src/peers/main.go | 52 --- cmd/guru/testdata/src/peers/main.golden | 100 ------ cmd/guru/testdata/src/pointsto-json/main.go | 27 -- .../testdata/src/pointsto-json/main.golden | 29 -- cmd/guru/testdata/src/pointsto/main.go | 75 ---- cmd/guru/testdata/src/pointsto/main.golden | 96 ----- cmd/guru/testdata/src/reflection/main.go | 30 -- cmd/guru/testdata/src/reflection/main.golden | 34 -- cmd/guru/testdata/src/softerrs/main.go | 15 - cmd/guru/testdata/src/softerrs/main.golden | 8 - cmd/guru/testdata/src/what-json/main.golden | 11 +- cmd/guru/testdata/src/what/main.golden | 8 +- cmd/guru/testdata/src/whicherrs/main.go | 32 -- cmd/guru/testdata/src/whicherrs/main.golden | 11 - cmd/guru/what.go | 40 +-- cmd/guru/whicherrs.go | 327 ------------------ go/callgraph/callgraph_test.go | 16 - 37 files changed, 24 insertions(+), 2553 deletions(-) delete mode 100644 cmd/guru/callees.go delete mode 100644 cmd/guru/callers.go delete mode 100644 cmd/guru/callstack.go delete mode 100644 cmd/guru/peers.go delete mode 100644 cmd/guru/pointsto.go delete mode 100644 cmd/guru/testdata/src/calls-json/main.go delete mode 100644 cmd/guru/testdata/src/calls-json/main.golden delete mode 100644 cmd/guru/testdata/src/calls/main.go delete mode 100644 cmd/guru/testdata/src/calls/main.golden delete mode 100644 cmd/guru/testdata/src/peers-json/main.go delete mode 100644 cmd/guru/testdata/src/peers-json/main.golden delete mode 100644 cmd/guru/testdata/src/peers/main.go delete mode 100644 cmd/guru/testdata/src/peers/main.golden delete mode 100644 cmd/guru/testdata/src/pointsto-json/main.go delete mode 100644 cmd/guru/testdata/src/pointsto-json/main.golden delete mode 100644 cmd/guru/testdata/src/pointsto/main.go delete mode 100644 cmd/guru/testdata/src/pointsto/main.golden delete mode 100644 cmd/guru/testdata/src/reflection/main.go delete mode 100644 cmd/guru/testdata/src/reflection/main.golden delete mode 100644 cmd/guru/testdata/src/softerrs/main.go delete mode 100644 cmd/guru/testdata/src/softerrs/main.golden delete mode 100644 cmd/guru/testdata/src/whicherrs/main.go delete mode 100644 cmd/guru/testdata/src/whicherrs/main.golden delete mode 100644 cmd/guru/whicherrs.go diff --git a/cmd/callgraph/main.go b/cmd/callgraph/main.go index eb8c0d1163d..33f7dfa8098 100644 --- a/cmd/callgraph/main.go +++ b/cmd/callgraph/main.go @@ -20,14 +20,12 @@ package main // import "golang.org/x/tools/cmd/callgraph" // callee file/line/col import ( - "bufio" "bytes" "flag" "fmt" "go/build" "go/token" "io" - "log" "os" "runtime" "text/template" @@ -39,7 +37,6 @@ import ( "golang.org/x/tools/go/callgraph/static" "golang.org/x/tools/go/callgraph/vta" "golang.org/x/tools/go/packages" - "golang.org/x/tools/go/pointer" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" ) @@ -47,7 +44,7 @@ import ( // flags var ( algoFlag = flag.String("algo", "rta", - `Call graph construction algorithm (static, cha, rta, vta, pta)`) + `Call graph construction algorithm (static, cha, rta, vta)`) testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages") @@ -55,9 +52,6 @@ var ( formatFlag = flag.String("format", "{{.Caller}}\t--{{.Dynamic}}-{{.Line}}:{{.Column}}-->\t{{.Callee}}", "A template expression specifying how to format an edge") - - ptalogFlag = flag.String("ptalog", "", - "Location of the points-to analysis log file, or empty to disable logging.") ) func init() { @@ -68,7 +62,7 @@ const Usage = `callgraph: display the call graph of a Go program. Usage: - callgraph [-algo=static|cha|rta|vta|pta] [-test] [-format=...] package... + callgraph [-algo=static|cha|rta|vta] [-test] [-format=...] package... Flags: @@ -78,11 +72,10 @@ Flags: cha Class Hierarchy Analysis rta Rapid Type Analysis vta Variable Type Analysis - pta inclusion-based Points-To Analysis The algorithms are ordered by increasing precision in their treatment of dynamic calls (and thus also computational cost). - RTA and PTA require a whole program (main or test), and + RTA requires a whole program (main or test), and include only functions reachable from main. -test Include the package's tests in the analysis. @@ -132,9 +125,9 @@ Examples: $GOROOT/src/net/http/triv.go | sort | uniq Show functions that make dynamic calls into the 'fmt' test package, - using the pointer analysis algorithm: + using the Rapid Type Analysis algorithm: - callgraph -format='{{.Caller}} -{{.Dynamic}}-> {{.Callee}}' -test -algo=pta fmt | + callgraph -format='{{.Caller}} -{{.Dynamic}}-> {{.Callee}}' -test -algo=rta fmt | sed -ne 's/-dynamic-/--/p' | sed -ne 's/-->.*fmt_test.*$//p' | sort | uniq @@ -205,39 +198,7 @@ func doCallgraph(dir, gopath, algo, format string, tests bool, args []string) er cg = cha.CallGraph(prog) case "pta": - // Set up points-to analysis log file. - var ptalog io.Writer - if *ptalogFlag != "" { - if f, err := os.Create(*ptalogFlag); err != nil { - log.Fatalf("Failed to create PTA log file: %s", err) - } else { - buf := bufio.NewWriter(f) - ptalog = buf - defer func() { - if err := buf.Flush(); err != nil { - log.Printf("flush: %s", err) - } - if err := f.Close(); err != nil { - log.Printf("close: %s", err) - } - }() - } - } - - mains, err := mainPackages(pkgs) - if err != nil { - return err - } - config := &pointer.Config{ - Mains: mains, - BuildCallGraph: true, - Log: ptalog, - } - ptares, err := pointer.Analyze(config) - if err != nil { - return err // internal error in pointer analysis - } - cg = ptares.CallGraph + return fmt.Errorf("pointer analysis is no longer supported (see Go issue #59676)") case "rta": mains, err := mainPackages(pkgs) diff --git a/cmd/callgraph/main_test.go b/cmd/callgraph/main_test.go index c8bee87e2b9..afcb7a967df 100644 --- a/cmd/callgraph/main_test.go +++ b/cmd/callgraph/main_test.go @@ -65,14 +65,6 @@ func TestCallgraph(t *testing.T) { "pkg.main --> pkg.main2", "pkg.main2 --> (pkg.D).f", }}, - {"pta", false, []string{ - // pta distinguishes main->C, main2->D. Also has a root node. - ` --> pkg.init`, - ` --> pkg.main`, - `pkg.main --> (pkg.C).f`, - `pkg.main --> pkg.main2`, - `pkg.main2 --> (pkg.D).f`, - }}, // tests: both the package's main and the test's main are called. // The callgraph includes all the guts of the "testing" package. {"rta", true, []string{ @@ -87,14 +79,6 @@ func TestCallgraph(t *testing.T) { `pkg.Example --> (pkg.C).f`, `pkg.main --> (pkg.C).f`, }}, - {"pta", true, []string{ - ` --> pkg.test.main`, - ` --> pkg.main`, - `pkg.test.main --> testing.MainStart`, - `testing.runExample --> pkg.Example`, - `pkg.Example --> (pkg.C).f`, - `pkg.main --> (pkg.C).f`, - }}, } { const format = "{{.Caller}} --> {{.Callee}}" stdout = new(bytes.Buffer) diff --git a/cmd/guru/callees.go b/cmd/guru/callees.go deleted file mode 100644 index 597895770ae..00000000000 --- a/cmd/guru/callees.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// The callees function reports the possible callees of the function call site -// identified by the specified source location. -func callees(q *Query) error { - lconf := loader.Config{Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, true) // needs exact pos - if err != nil { - return err - } - - // Determine the enclosing call for the specified position. - var e *ast.CallExpr - for _, n := range qpos.path { - if e, _ = n.(*ast.CallExpr); e != nil { - break - } - } - if e == nil { - return fmt.Errorf("there is no function call here") - } - // TODO(adonovan): issue an error if the call is "too far - // away" from the current selection, as this most likely is - // not what the user intended. - - // Reject type conversions. - if qpos.info.Types[e.Fun].IsType() { - return fmt.Errorf("this is a type conversion, not a function call") - } - - // Deal with obviously static calls before constructing SSA form. - // Some static calls may yet require SSA construction, - // e.g. f := func(){}; f(). - switch funexpr := unparen(e.Fun).(type) { - case *ast.Ident: - switch obj := qpos.info.Uses[funexpr].(type) { - case *types.Builtin: - // Reject calls to built-ins. - return fmt.Errorf("this is a call to the built-in '%s' operator", obj.Name()) - case *types.Func: - // This is a static function call - q.Output(lprog.Fset, &calleesTypesResult{ - site: e, - callee: obj, - }) - return nil - } - case *ast.SelectorExpr: - sel := qpos.info.Selections[funexpr] - if sel == nil { - // qualified identifier. - // May refer to top level function variable - // or to top level function. - callee := qpos.info.Uses[funexpr.Sel] - if obj, ok := callee.(*types.Func); ok { - q.Output(lprog.Fset, &calleesTypesResult{ - site: e, - callee: obj, - }) - return nil - } - } else if sel.Kind() == types.MethodVal { - // Inspect the receiver type of the selected method. - // If it is concrete, the call is statically dispatched. - // (Due to implicit field selections, it is not enough to look - // at sel.Recv(), the type of the actual receiver expression.) - method := sel.Obj().(*types.Func) - recvtype := method.Type().(*types.Signature).Recv().Type() - if !types.IsInterface(recvtype) { - // static method call - q.Output(lprog.Fset, &calleesTypesResult{ - site: e, - callee: method, - }) - return nil - } - } - } - - prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - pkg := prog.Package(qpos.info.Pkg) - if pkg == nil { - return fmt.Errorf("no SSA package") - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - // Ascertain calling function and call site. - callerFn := ssa.EnclosingFunction(pkg, qpos.path) - if callerFn == nil { - return fmt.Errorf("no SSA function built for this location (dead code?)") - } - - // Find the call site. - site, err := findCallSite(callerFn, e) - if err != nil { - return err - } - - funcs, err := findCallees(ptaConfig, site) - if err != nil { - return err - } - - q.Output(lprog.Fset, &calleesSSAResult{ - site: site, - funcs: funcs, - }) - return nil -} - -func findCallSite(fn *ssa.Function, call *ast.CallExpr) (ssa.CallInstruction, error) { - instr, _ := fn.ValueForExpr(call) - callInstr, _ := instr.(ssa.CallInstruction) - if instr == nil { - return nil, fmt.Errorf("this call site is unreachable in this analysis") - } - return callInstr, nil -} - -func findCallees(conf *pointer.Config, site ssa.CallInstruction) ([]*ssa.Function, error) { - // Avoid running the pointer analysis for static calls. - if callee := site.Common().StaticCallee(); callee != nil { - switch callee.String() { - case "runtime.SetFinalizer", "(reflect.Value).Call": - // The PTA treats calls to these intrinsics as dynamic. - // TODO(adonovan): avoid reliance on PTA internals. - - default: - return []*ssa.Function{callee}, nil // singleton - } - } - - // Dynamic call: use pointer analysis. - conf.BuildCallGraph = true - cg := ptrAnalysis(conf).CallGraph - cg.DeleteSyntheticNodes() - - // Find all call edges from the site. - n := cg.Nodes[site.Parent()] - if n == nil { - return nil, fmt.Errorf("this call site is unreachable in this analysis") - } - calleesMap := make(map[*ssa.Function]bool) - for _, edge := range n.Out { - if edge.Site == site { - calleesMap[edge.Callee.Func] = true - } - } - - // De-duplicate and sort. - funcs := make([]*ssa.Function, 0, len(calleesMap)) - for f := range calleesMap { - funcs = append(funcs, f) - } - sort.Sort(byFuncPos(funcs)) - return funcs, nil -} - -type calleesSSAResult struct { - site ssa.CallInstruction - funcs []*ssa.Function -} - -type calleesTypesResult struct { - site *ast.CallExpr - callee *types.Func -} - -func (r *calleesSSAResult) PrintPlain(printf printfFunc) { - if len(r.funcs) == 0 { - // dynamic call on a provably nil func/interface - printf(r.site, "%s on nil value", r.site.Common().Description()) - } else { - printf(r.site, "this %s dispatches to:", r.site.Common().Description()) - for _, callee := range r.funcs { - printf(callee, "\t%s", callee) - } - } -} - -func (r *calleesSSAResult) JSON(fset *token.FileSet) []byte { - j := &serial.Callees{ - Pos: fset.Position(r.site.Pos()).String(), - Desc: r.site.Common().Description(), - } - for _, callee := range r.funcs { - j.Callees = append(j.Callees, &serial.Callee{ - Name: callee.String(), - Pos: fset.Position(callee.Pos()).String(), - }) - } - return toJSON(j) -} - -func (r *calleesTypesResult) PrintPlain(printf printfFunc) { - printf(r.site, "this static function call dispatches to:") - printf(r.callee, "\t%s", r.callee.FullName()) -} - -func (r *calleesTypesResult) JSON(fset *token.FileSet) []byte { - j := &serial.Callees{ - Pos: fset.Position(r.site.Pos()).String(), - Desc: "static function call", - } - j.Callees = []*serial.Callee{ - { - Name: r.callee.FullName(), - Pos: fset.Position(r.callee.Pos()).String(), - }, - } - return toJSON(j) -} - -// NB: byFuncPos is not deterministic across packages since it depends on load order. -// Use lessPos if the tests need it. -type byFuncPos []*ssa.Function - -func (a byFuncPos) Len() int { return len(a) } -func (a byFuncPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() } -func (a byFuncPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/cmd/guru/callers.go b/cmd/guru/callers.go deleted file mode 100644 index 8afefba338e..00000000000 --- a/cmd/guru/callers.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/token" - "go/types" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// The callers function reports the possible callers of the function -// immediately enclosing the specified source location. -func callers(q *Query) error { - lconf := loader.Config{Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, false) - if err != nil { - return err - } - - prog := ssautil.CreateProgram(lprog, 0) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - pkg := prog.Package(qpos.info.Pkg) - if pkg == nil { - return fmt.Errorf("no SSA package") - } - if !ssa.HasEnclosingFunction(pkg, qpos.path) { - return fmt.Errorf("this position is not inside a function") - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - target := ssa.EnclosingFunction(pkg, qpos.path) - if target == nil { - return fmt.Errorf("no SSA function built for this location (dead code?)") - } - - // If the function is never address-taken, all calls are direct - // and can be found quickly by inspecting the whole SSA program. - cg := directCallsTo(target, entryPoints(ptaConfig.Mains)) - if cg == nil { - // Run the pointer analysis, recording each - // call found to originate from target. - // (Pointer analysis may return fewer results than - // directCallsTo because it ignores dead code.) - ptaConfig.BuildCallGraph = true - cg = ptrAnalysis(ptaConfig).CallGraph - } - cg.DeleteSyntheticNodes() - edges := cg.CreateNode(target).In - - // TODO(adonovan): sort + dedup calls to ensure test determinism. - - q.Output(lprog.Fset, &callersResult{ - target: target, - callgraph: cg, - edges: edges, - }) - return nil -} - -// directCallsTo inspects the whole program and returns a callgraph -// containing edges for all direct calls to the target function. -// directCallsTo returns nil if the function is ever address-taken. -func directCallsTo(target *ssa.Function, entrypoints []*ssa.Function) *callgraph.Graph { - cg := callgraph.New(nil) // use nil as root *Function - targetNode := cg.CreateNode(target) - - // Is the function a program entry point? - // If so, add edge from callgraph root. - for _, f := range entrypoints { - if f == target { - callgraph.AddEdge(cg.Root, nil, targetNode) - } - } - - // Find receiver type (for methods). - var recvType types.Type - if recv := target.Signature.Recv(); recv != nil { - recvType = recv.Type() - } - - // Find all direct calls to function, - // or a place where its address is taken. - var space [32]*ssa.Value // preallocate - for fn := range ssautil.AllFunctions(target.Prog) { - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - // Is this a method (T).f of a concrete type T - // whose runtime type descriptor is address-taken? - // (To be fully sound, we would have to check that - // the type doesn't make it to reflection as a - // subelement of some other address-taken type.) - if recvType != nil { - if mi, ok := instr.(*ssa.MakeInterface); ok { - if types.Identical(mi.X.Type(), recvType) { - return nil // T is address-taken - } - if ptr, ok := mi.X.Type().(*types.Pointer); ok && - types.Identical(ptr.Elem(), recvType) { - return nil // *T is address-taken - } - } - } - - // Direct call to target? - rands := instr.Operands(space[:0]) - if site, ok := instr.(ssa.CallInstruction); ok && - site.Common().Value == target { - callgraph.AddEdge(cg.CreateNode(fn), site, targetNode) - rands = rands[1:] // skip .Value (rands[0]) - } - - // Address-taken? - for _, rand := range rands { - if rand != nil && *rand == target { - return nil - } - } - } - } - } - - return cg -} - -func entryPoints(mains []*ssa.Package) []*ssa.Function { - var entrypoints []*ssa.Function - for _, pkg := range mains { - entrypoints = append(entrypoints, pkg.Func("init")) - if main := pkg.Func("main"); main != nil && pkg.Pkg.Name() == "main" { - entrypoints = append(entrypoints, main) - } - } - return entrypoints -} - -type callersResult struct { - target *ssa.Function - callgraph *callgraph.Graph - edges []*callgraph.Edge -} - -func (r *callersResult) PrintPlain(printf printfFunc) { - root := r.callgraph.Root - if r.edges == nil { - printf(r.target, "%s is not reachable in this program.", r.target) - } else { - printf(r.target, "%s is called from these %d sites:", r.target, len(r.edges)) - for _, edge := range r.edges { - if edge.Caller == root { - printf(r.target, "the root of the call graph") - } else { - printf(edge, "\t%s from %s", edge.Description(), edge.Caller.Func) - } - } - } -} - -func (r *callersResult) JSON(fset *token.FileSet) []byte { - var callers []serial.Caller - for _, edge := range r.edges { - callers = append(callers, serial.Caller{ - Caller: edge.Caller.Func.String(), - Pos: fset.Position(edge.Pos()).String(), - Desc: edge.Description(), - }) - } - return toJSON(callers) -} diff --git a/cmd/guru/callstack.go b/cmd/guru/callstack.go deleted file mode 100644 index c3d6d6ee75a..00000000000 --- a/cmd/guru/callstack.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/token" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/callgraph/static" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// The callstack function displays an arbitrary path from a root of the callgraph -// to the function at the current position. -// -// The information may be misleading in a context-insensitive -// analysis. e.g. the call path X->Y->Z might be infeasible if Y never -// calls Z when it is called from X. TODO(adonovan): think about UI. -// -// TODO(adonovan): permit user to specify a starting point other than -// the analysis root. -func callstack(q *Query) error { - fset := token.NewFileSet() - lconf := loader.Config{Fset: fset, Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, false) - if err != nil { - return err - } - - prog := ssautil.CreateProgram(lprog, 0) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - pkg := prog.Package(qpos.info.Pkg) - if pkg == nil { - return fmt.Errorf("no SSA package") - } - - if !ssa.HasEnclosingFunction(pkg, qpos.path) { - return fmt.Errorf("this position is not inside a function") - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - target := ssa.EnclosingFunction(pkg, qpos.path) - if target == nil { - return fmt.Errorf("no SSA function built for this location (dead code?)") - } - - var callpath []*callgraph.Edge - isEnd := func(n *callgraph.Node) bool { return n.Func == target } - - // First, build a callgraph containing only static call edges, - // and search for an arbitrary path from a root to the target function. - // This is quick, and the user wants a static path if one exists. - cg := static.CallGraph(prog) - cg.DeleteSyntheticNodes() - for _, ep := range entryPoints(ptaConfig.Mains) { - callpath = callgraph.PathSearch(cg.CreateNode(ep), isEnd) - if callpath != nil { - break - } - } - - // No fully static path found. - // Run the pointer analysis and build a complete call graph. - if callpath == nil { - ptaConfig.BuildCallGraph = true - cg := ptrAnalysis(ptaConfig).CallGraph - cg.DeleteSyntheticNodes() - callpath = callgraph.PathSearch(cg.Root, isEnd) - if callpath != nil { - callpath = callpath[1:] // remove synthetic edge from - } - } - - q.Output(fset, &callstackResult{ - qpos: qpos, - target: target, - callpath: callpath, - }) - return nil -} - -type callstackResult struct { - qpos *queryPos - target *ssa.Function - callpath []*callgraph.Edge -} - -func (r *callstackResult) PrintPlain(printf printfFunc) { - if r.callpath != nil { - printf(r.qpos, "Found a call path from root to %s", r.target) - printf(r.target, "%s", r.target) - for i := len(r.callpath) - 1; i >= 0; i-- { - edge := r.callpath[i] - printf(edge, "%s from %s", edge.Description(), edge.Caller.Func) - } - } else { - printf(r.target, "%s is unreachable in this analysis scope", r.target) - } -} - -func (r *callstackResult) JSON(fset *token.FileSet) []byte { - var callers []serial.Caller - for i := len(r.callpath) - 1; i >= 0; i-- { // (innermost first) - edge := r.callpath[i] - callers = append(callers, serial.Caller{ - Pos: fset.Position(edge.Pos()).String(), - Caller: edge.Caller.Func.String(), - Desc: edge.Description(), - }) - } - return toJSON(&serial.CallStack{ - Pos: fset.Position(r.target.Pos()).String(), - Target: r.target.String(), - Callers: callers, - }) -} diff --git a/cmd/guru/guru.go b/cmd/guru/guru.go index f8e6cfaa826..575136cf3d8 100644 --- a/cmd/guru/guru.go +++ b/cmd/guru/guru.go @@ -24,10 +24,7 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/buildutil" "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" ) type printfFunc func(pos interface{}, format string, args ...interface{}) @@ -70,11 +67,6 @@ type Query struct { Pos string // query position Build *build.Context // package loading configuration - // pointer analysis options - Scope []string // main packages in (*loader.Config).FromArgs syntax - PTALog io.Writer // (optional) pointer-analysis log file - Reflection bool // model reflection soundly (currently slow). - // result-printing function, safe for concurrent use Output func(*token.FileSet, QueryResult) } @@ -82,18 +74,6 @@ type Query struct { // Run runs an guru query and populates its Fset and Result. func Run(mode string, q *Query) error { switch mode { - case "callees": - return callees(q) - case "callers": - return callers(q) - case "callstack": - return callstack(q) - case "peers": - return peers(q) - case "pointsto": - return pointsto(q) - case "whicherrs": - return whicherrs(q) case "definition": return definition(q) case "describe": @@ -106,46 +86,13 @@ func Run(mode string, q *Query) error { return referrers(q) case "what": return what(q) + case "callees", "callers", "pointsto", "whicherrs", "callstack", "peers": + return fmt.Errorf("mode %q is no longer supported (see Go issue #59676)", mode) default: return fmt.Errorf("invalid mode: %q", mode) } } -func setPTAScope(lconf *loader.Config, scope []string) error { - pkgs := buildutil.ExpandPatterns(lconf.Build, scope) - if len(pkgs) == 0 { - return fmt.Errorf("no packages specified for pointer analysis scope") - } - // The value of each entry in pkgs is true, - // giving ImportWithTests (not Import) semantics. - lconf.ImportPkgs = pkgs - return nil -} - -// Create a pointer.Config whose scope is the initial packages of lprog -// and their dependencies. -func setupPTA(prog *ssa.Program, lprog *loader.Program, ptaLog io.Writer, reflection bool) (*pointer.Config, error) { - // For each initial package (specified on the command line), - // analyze the package if it has a main function. - var mains []*ssa.Package - for _, info := range lprog.InitialPackages() { - p := prog.Package(info.Pkg) - - // Add package to the pointer analysis scope. - if p.Pkg.Name() == "main" && p.Func("main") != nil { - mains = append(mains, p) - } - } - if mains == nil { - return nil, fmt.Errorf("analysis scope has no main and no tests") - } - return &pointer.Config{ - Log: ptaLog, - Reflection: reflection, - Mains: mains, - }, nil -} - // importQueryPackage finds the package P containing the // query position and tells conf to import it. // It returns the package's path. @@ -307,15 +254,6 @@ func allowErrors(lconf *loader.Config) { lconf.TypeChecker.Error = func(err error) {} } -// ptrAnalysis runs the pointer analysis and returns its result. -func ptrAnalysis(conf *pointer.Config) *pointer.Result { - result, err := pointer.Analyze(conf) - if err != nil { - panic(err) // pointer analysis internal error - } - return result -} - func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } // deref returns a pointer's element type; otherwise it returns typ. @@ -333,7 +271,7 @@ func deref(typ types.Type) types.Type { // - a token.Pos, denoting a position // - an ast.Node, denoting an interval // - anything with a Pos() method: -// ssa.Member, ssa.Value, ssa.Instruction, types.Object, pointer.Label, etc. +// ssa.Member, ssa.Value, ssa.Instruction, types.Object, etc. // - a QueryPos, denoting the extent of the user's query. // - nil, meaning no position at all. // diff --git a/cmd/guru/guru_test.go b/cmd/guru/guru_test.go index 44ec2ca7279..905a9e2cf49 100644 --- a/cmd/guru/guru_test.go +++ b/cmd/guru/guru_test.go @@ -172,7 +172,6 @@ func doQuery(out io.Writer, q *query, json bool) { var buildContext = build.Default buildContext.GOPATH = "testdata" - pkg := filepath.Dir(strings.TrimPrefix(q.filename, "testdata/src/")) gopathAbs, _ := filepath.Abs(buildContext.GOPATH) @@ -195,11 +194,9 @@ func doQuery(out io.Writer, q *query, json bool) { } query := guru.Query{ - Pos: q.queryPos, - Build: &buildContext, - Scope: []string{pkg}, - Reflection: true, - Output: outputFn, + Pos: q.queryPos, + Build: &buildContext, + Output: outputFn, } if err := guru.Run(q.verb, &query); err != nil { @@ -243,28 +240,17 @@ func TestGuru(t *testing.T) { for _, filename := range []string{ "testdata/src/alias/alias.go", - "testdata/src/calls/main.go", "testdata/src/describe/main.go", "testdata/src/freevars/main.go", "testdata/src/implements/main.go", "testdata/src/implements-methods/main.go", "testdata/src/imports/main.go", - "testdata/src/peers/main.go", - "testdata/src/pointsto/main.go", "testdata/src/referrers/main.go", - "testdata/src/reflection/main.go", "testdata/src/what/main.go", - "testdata/src/whicherrs/main.go", - "testdata/src/softerrs/main.go", - // JSON: - // TODO(adonovan): most of these are very similar; combine them. - "testdata/src/calls-json/main.go", - "testdata/src/peers-json/main.go", "testdata/src/definition-json/main.go", "testdata/src/describe-json/main.go", "testdata/src/implements-json/main.go", "testdata/src/implements-methods-json/main.go", - "testdata/src/pointsto-json/main.go", "testdata/src/referrers-json/main.go", "testdata/src/what-json/main.go", } { diff --git a/cmd/guru/implements.go b/cmd/guru/implements.go index 527e88bd778..9e4d0dba6ee 100644 --- a/cmd/guru/implements.go +++ b/cmd/guru/implements.go @@ -34,12 +34,7 @@ func implements(q *Query) error { } // Set the packages to search. - if len(q.Scope) > 0 { - // Inspect all packages in the analysis scope, if specified. - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - } else { + { // Otherwise inspect the forward and reverse // transitive closure of the selected package. // (In theory even this is incomplete.) diff --git a/cmd/guru/main.go b/cmd/guru/main.go index 7ad083e4590..283b1db7a64 100644 --- a/cmd/guru/main.go +++ b/cmd/guru/main.go @@ -10,18 +10,15 @@ package main // import "golang.org/x/tools/cmd/guru" import ( - "bufio" "flag" "fmt" "go/build" "go/token" - "io" "log" "os" "path/filepath" "runtime" "runtime/pprof" - "strings" "sync" "golang.org/x/tools/go/buildutil" @@ -64,10 +61,8 @@ The mode argument determines the query to perform: freevars show free variables of selection implements show 'implements' relation for selected type or method peers show send/receive corresponding to selected channel op - pointsto show variables the selected pointer may point to referrers show all refs to entity denoted by selected identifier what show basic information about the selected syntax node - whicherrs show possible values of the selected error variable The position argument specifies the filename and byte offset (or range) of the syntax element to query. For example: @@ -137,25 +132,6 @@ func main() { os.Exit(2) } - // Set up points-to analysis log file. - var ptalog io.Writer - if *ptalogFlag != "" { - if f, err := os.Create(*ptalogFlag); err != nil { - log.Fatalf("Failed to create PTA log file: %s", err) - } else { - buf := bufio.NewWriter(f) - ptalog = buf - defer func() { - if err := buf.Flush(); err != nil { - log.Printf("flush: %s", err) - } - if err := f.Close(); err != nil { - log.Printf("close: %s", err) - } - }() - } - } - // Profiling support. if *cpuprofileFlag != "" { f, err := os.Create(*cpuprofileFlag) @@ -202,20 +178,11 @@ func main() { } } - // Avoid corner case of split(""). - var scope []string - if *scopeFlag != "" { - scope = strings.Split(*scopeFlag, ",") - } - // Ask the guru. query := Query{ - Pos: posn, - Build: ctxt, - Scope: scope, - PTALog: ptalog, - Reflection: *reflectFlag, - Output: output, + Pos: posn, + Build: ctxt, + Output: output, } if err := Run(mode, &query); err != nil { diff --git a/cmd/guru/peers.go b/cmd/guru/peers.go deleted file mode 100644 index 6e138bf06f8..00000000000 --- a/cmd/guru/peers.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// peers enumerates, for a given channel send (or receive) operation, -// the set of possible receives (or sends) that correspond to it. -// -// TODO(adonovan): support reflect.{Select,Recv,Send,Close}. -// TODO(adonovan): permit the user to query based on a MakeChan (not send/recv), -// or the implicit receive in "for v := range ch". -func peers(q *Query) error { - lconf := loader.Config{Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, false) - if err != nil { - return err - } - - prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - opPos := findOp(qpos) - if opPos == token.NoPos { - return fmt.Errorf("there is no channel operation here") - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - var queryOp chanOp // the originating send or receive operation - var ops []chanOp // all sends/receives of opposite direction - - // Look at all channel operations in the whole ssa.Program. - // Build a list of those of same type as the query. - allFuncs := ssautil.AllFunctions(prog) - for fn := range allFuncs { - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - for _, op := range chanOps(instr) { - ops = append(ops, op) - if op.pos == opPos { - queryOp = op // we found the query op - } - } - } - } - } - if queryOp.ch == nil { - return fmt.Errorf("ssa.Instruction for send/receive not found") - } - - // Discard operations of wrong channel element type. - // Build set of channel ssa.Values as query to pointer analysis. - // We compare channels by element types, not channel types, to - // ignore both directionality and type names. - queryType := queryOp.ch.Type() - queryElemType := queryType.Underlying().(*types.Chan).Elem() - ptaConfig.AddQuery(queryOp.ch) - i := 0 - for _, op := range ops { - if types.Identical(op.ch.Type().Underlying().(*types.Chan).Elem(), queryElemType) { - ptaConfig.AddQuery(op.ch) - ops[i] = op - i++ - } - } - ops = ops[:i] - - // Run the pointer analysis. - ptares := ptrAnalysis(ptaConfig) - - // Find the points-to set. - queryChanPtr := ptares.Queries[queryOp.ch] - - // Ascertain which make(chan) labels the query's channel can alias. - var makes []token.Pos - for _, label := range queryChanPtr.PointsTo().Labels() { - makes = append(makes, label.Pos()) - } - sort.Sort(byPos(makes)) - - // Ascertain which channel operations can alias the same make(chan) labels. - var sends, receives, closes []token.Pos - for _, op := range ops { - if ptr, ok := ptares.Queries[op.ch]; ok && ptr.MayAlias(queryChanPtr) { - switch op.dir { - case types.SendOnly: - sends = append(sends, op.pos) - case types.RecvOnly: - receives = append(receives, op.pos) - case types.SendRecv: - closes = append(closes, op.pos) - } - } - } - sort.Sort(byPos(sends)) - sort.Sort(byPos(receives)) - sort.Sort(byPos(closes)) - - q.Output(lprog.Fset, &peersResult{ - queryPos: opPos, - queryType: queryType, - makes: makes, - sends: sends, - receives: receives, - closes: closes, - }) - return nil -} - -// findOp returns the position of the enclosing send/receive/close op. -// For send and receive operations, this is the position of the <- token; -// for close operations, it's the Lparen of the function call. -// -// TODO(adonovan): handle implicit receive operations from 'for...range chan' statements. -func findOp(qpos *queryPos) token.Pos { - for _, n := range qpos.path { - switch n := n.(type) { - case *ast.UnaryExpr: - if n.Op == token.ARROW { - return n.OpPos - } - case *ast.SendStmt: - return n.Arrow - case *ast.CallExpr: - // close function call can only exist as a direct identifier - if close, ok := unparen(n.Fun).(*ast.Ident); ok { - if b, ok := qpos.info.Info.Uses[close].(*types.Builtin); ok && b.Name() == "close" { - return n.Lparen - } - } - } - } - return token.NoPos -} - -// chanOp abstracts an ssa.Send, ssa.Unop(ARROW), or a SelectState. -type chanOp struct { - ch ssa.Value - dir types.ChanDir // SendOnly=send, RecvOnly=recv, SendRecv=close - pos token.Pos -} - -// chanOps returns a slice of all the channel operations in the instruction. -func chanOps(instr ssa.Instruction) []chanOp { - // TODO(adonovan): handle calls to reflect.{Select,Recv,Send,Close} too. - var ops []chanOp - switch instr := instr.(type) { - case *ssa.UnOp: - if instr.Op == token.ARROW { - ops = append(ops, chanOp{instr.X, types.RecvOnly, instr.Pos()}) - } - case *ssa.Send: - ops = append(ops, chanOp{instr.Chan, types.SendOnly, instr.Pos()}) - case *ssa.Select: - for _, st := range instr.States { - ops = append(ops, chanOp{st.Chan, st.Dir, st.Pos}) - } - case ssa.CallInstruction: - cc := instr.Common() - if b, ok := cc.Value.(*ssa.Builtin); ok && b.Name() == "close" { - ops = append(ops, chanOp{cc.Args[0], types.SendRecv, cc.Pos()}) - } - } - return ops -} - -// TODO(adonovan): show the line of text for each pos, like "referrers" does. -type peersResult struct { - queryPos token.Pos // of queried channel op - queryType types.Type // type of queried channel - makes, sends, receives, closes []token.Pos // positions of aliased makechan/send/receive/close instrs -} - -func (r *peersResult) PrintPlain(printf printfFunc) { - if len(r.makes) == 0 { - printf(r.queryPos, "This channel can't point to anything.") - return - } - printf(r.queryPos, "This channel of type %s may be:", r.queryType) - for _, alloc := range r.makes { - printf(alloc, "\tallocated here") - } - for _, send := range r.sends { - printf(send, "\tsent to, here") - } - for _, receive := range r.receives { - printf(receive, "\treceived from, here") - } - for _, clos := range r.closes { - printf(clos, "\tclosed, here") - } -} - -func (r *peersResult) JSON(fset *token.FileSet) []byte { - peers := &serial.Peers{ - Pos: fset.Position(r.queryPos).String(), - Type: r.queryType.String(), - } - for _, alloc := range r.makes { - peers.Allocs = append(peers.Allocs, fset.Position(alloc).String()) - } - for _, send := range r.sends { - peers.Sends = append(peers.Sends, fset.Position(send).String()) - } - for _, receive := range r.receives { - peers.Receives = append(peers.Receives, fset.Position(receive).String()) - } - for _, clos := range r.closes { - peers.Closes = append(peers.Closes, fset.Position(clos).String()) - } - return toJSON(peers) -} - -// -------- utils -------- - -// NB: byPos is not deterministic across packages since it depends on load order. -// Use lessPos if the tests need it. -type byPos []token.Pos - -func (p byPos) Len() int { return len(p) } -func (p byPos) Less(i, j int) bool { return p[i] < p[j] } -func (p byPos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/cmd/guru/pointsto.go b/cmd/guru/pointsto.go deleted file mode 100644 index e7608442c1b..00000000000 --- a/cmd/guru/pointsto.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// pointsto runs the pointer analysis on the selected expression, -// and reports its points-to set (for a pointer-like expression) -// or its dynamic types (for an interface, reflect.Value, or -// reflect.Type expression) and their points-to sets. -// -// All printed sets are sorted to ensure determinism. -func pointsto(q *Query) error { - lconf := loader.Config{Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, true) // needs exact pos - if err != nil { - return err - } - - prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - path, action := findInterestingNode(qpos.info, qpos.path) - if action != actionExpr { - return fmt.Errorf("pointer analysis wants an expression; got %s", - astutil.NodeDescription(qpos.path[0])) - } - - var expr ast.Expr - var obj types.Object - switch n := path[0].(type) { - case *ast.ValueSpec: - // ambiguous ValueSpec containing multiple names - return fmt.Errorf("multiple value specification") - case *ast.Ident: - obj = qpos.info.ObjectOf(n) - expr = n - case ast.Expr: - expr = n - default: - // TODO(adonovan): is this reachable? - return fmt.Errorf("unexpected AST for expr: %T", n) - } - - // Reject non-pointerlike types (includes all constants---except nil). - // TODO(adonovan): reject nil too. - typ := qpos.info.TypeOf(expr) - if !pointer.CanPoint(typ) { - return fmt.Errorf("pointer analysis wants an expression of reference type; got %s", typ) - } - - // Determine the ssa.Value for the expression. - var value ssa.Value - var isAddr bool - if obj != nil { - // def/ref of func/var object - value, isAddr, err = ssaValueForIdent(prog, qpos.info, obj, path) - } else { - value, isAddr, err = ssaValueForExpr(prog, qpos.info, path) - } - if err != nil { - return err // e.g. trivially dead code - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - // Run the pointer analysis. - ptrs, err := runPTA(ptaConfig, value, isAddr) - if err != nil { - return err // e.g. analytically unreachable - } - - q.Output(lprog.Fset, &pointstoResult{ - qpos: qpos, - typ: typ, - ptrs: ptrs, - }) - return nil -} - -// ssaValueForIdent returns the ssa.Value for the ast.Ident whose path -// to the root of the AST is path. isAddr reports whether the -// ssa.Value is the address denoted by the ast.Ident, not its value. -func ssaValueForIdent(prog *ssa.Program, qinfo *loader.PackageInfo, obj types.Object, path []ast.Node) (value ssa.Value, isAddr bool, err error) { - switch obj := obj.(type) { - case *types.Var: - pkg := prog.Package(qinfo.Pkg) - pkg.Build() - if v, addr := prog.VarValue(obj, pkg, path); v != nil { - return v, addr, nil - } - return nil, false, fmt.Errorf("can't locate SSA Value for var %s", obj.Name()) - - case *types.Func: - fn := prog.FuncValue(obj) - if fn == nil { - return nil, false, fmt.Errorf("%s is an interface method", obj) - } - // TODO(adonovan): there's no point running PTA on a *Func ident. - // Eliminate this feature. - return fn, false, nil - } - panic(obj) -} - -// ssaValueForExpr returns the ssa.Value of the non-ast.Ident -// expression whose path to the root of the AST is path. -func ssaValueForExpr(prog *ssa.Program, qinfo *loader.PackageInfo, path []ast.Node) (value ssa.Value, isAddr bool, err error) { - pkg := prog.Package(qinfo.Pkg) - pkg.SetDebugMode(true) - pkg.Build() - - fn := ssa.EnclosingFunction(pkg, path) - if fn == nil { - return nil, false, fmt.Errorf("no SSA function built for this location (dead code?)") - } - - if v, addr := fn.ValueForExpr(path[0].(ast.Expr)); v != nil { - return v, addr, nil - } - - return nil, false, fmt.Errorf("can't locate SSA Value for expression in %s", fn) -} - -// runPTA runs the pointer analysis of the selected SSA value or address. -func runPTA(conf *pointer.Config, v ssa.Value, isAddr bool) (ptrs []pointerResult, err error) { - T := v.Type() - if isAddr { - conf.AddIndirectQuery(v) - T = deref(T) - } else { - conf.AddQuery(v) - } - ptares := ptrAnalysis(conf) - - var ptr pointer.Pointer - if isAddr { - ptr = ptares.IndirectQueries[v] - } else { - ptr = ptares.Queries[v] - } - if ptr == (pointer.Pointer{}) { - return nil, fmt.Errorf("pointer analysis did not find expression (dead code?)") - } - pts := ptr.PointsTo() - - if pointer.CanHaveDynamicTypes(T) { - // Show concrete types for interface/reflect.Value expression. - if concs := pts.DynamicTypes(); concs.Len() > 0 { - concs.Iterate(func(conc types.Type, pta interface{}) { - labels := pta.(pointer.PointsToSet).Labels() - sort.Sort(byPosAndString(labels)) // to ensure determinism - ptrs = append(ptrs, pointerResult{conc, labels}) - }) - } - } else { - // Show labels for other expressions. - labels := pts.Labels() - sort.Sort(byPosAndString(labels)) // to ensure determinism - ptrs = append(ptrs, pointerResult{T, labels}) - } - sort.Sort(byTypeString(ptrs)) // to ensure determinism - return ptrs, nil -} - -type pointerResult struct { - typ types.Type // type of the pointer (always concrete) - labels []*pointer.Label // set of labels -} - -type pointstoResult struct { - qpos *queryPos - typ types.Type // type of expression - ptrs []pointerResult // pointer info (typ is concrete => len==1) -} - -func (r *pointstoResult) PrintPlain(printf printfFunc) { - if pointer.CanHaveDynamicTypes(r.typ) { - // Show concrete types for interface, reflect.Type or - // reflect.Value expression. - - if len(r.ptrs) > 0 { - printf(r.qpos, "this %s may contain these dynamic types:", r.qpos.typeString(r.typ)) - for _, ptr := range r.ptrs { - var obj types.Object - if nt, ok := deref(ptr.typ).(*types.Named); ok { - obj = nt.Obj() - } - if len(ptr.labels) > 0 { - printf(obj, "\t%s, may point to:", r.qpos.typeString(ptr.typ)) - printLabels(printf, ptr.labels, "\t\t") - } else { - printf(obj, "\t%s", r.qpos.typeString(ptr.typ)) - } - } - } else { - printf(r.qpos, "this %s cannot contain any dynamic types.", r.typ) - } - } else { - // Show labels for other expressions. - if ptr := r.ptrs[0]; len(ptr.labels) > 0 { - printf(r.qpos, "this %s may point to these objects:", - r.qpos.typeString(r.typ)) - printLabels(printf, ptr.labels, "\t") - } else { - printf(r.qpos, "this %s may not point to anything.", - r.qpos.typeString(r.typ)) - } - } -} - -func (r *pointstoResult) JSON(fset *token.FileSet) []byte { - var pts []serial.PointsTo - for _, ptr := range r.ptrs { - var namePos string - if nt, ok := deref(ptr.typ).(*types.Named); ok { - namePos = fset.Position(nt.Obj().Pos()).String() - } - var labels []serial.PointsToLabel - for _, l := range ptr.labels { - labels = append(labels, serial.PointsToLabel{ - Pos: fset.Position(l.Pos()).String(), - Desc: l.String(), - }) - } - pts = append(pts, serial.PointsTo{ - Type: r.qpos.typeString(ptr.typ), - NamePos: namePos, - Labels: labels, - }) - } - return toJSON(pts) -} - -type byTypeString []pointerResult - -func (a byTypeString) Len() int { return len(a) } -func (a byTypeString) Less(i, j int) bool { return a[i].typ.String() < a[j].typ.String() } -func (a byTypeString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type byPosAndString []*pointer.Label - -func (a byPosAndString) Len() int { return len(a) } -func (a byPosAndString) Less(i, j int) bool { - cmp := a[i].Pos() - a[j].Pos() - return cmp < 0 || (cmp == 0 && a[i].String() < a[j].String()) -} -func (a byPosAndString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func printLabels(printf printfFunc, labels []*pointer.Label, prefix string) { - // TODO(adonovan): due to context-sensitivity, many of these - // labels may differ only by context, which isn't apparent. - for _, label := range labels { - printf(label, "%s%s", prefix, label) - } -} diff --git a/cmd/guru/serial/serial.go b/cmd/guru/serial/serial.go index 082e6cf0d7f..3af7f4731f7 100644 --- a/cmd/guru/serial/serial.go +++ b/cmd/guru/serial/serial.go @@ -10,18 +10,12 @@ // // Query Result stream // ----- ------------- -// callees Callees -// callers Caller ... -// callstack CallStack // definition Definition // describe Describe // freevars FreeVar ... // implements Implements -// peers Peers -// pointsto PointsTo ... // referrers ReferrersInitial ReferrersPackage ... // what What -// whicherrs WhichErrs // // All 'pos' strings in the output are of the form "file:line:col", // where line is the 1-based line number and col is the 1-based byte index. diff --git a/cmd/guru/testdata/src/calls-json/main.go b/cmd/guru/testdata/src/calls-json/main.go deleted file mode 100644 index 9d58ed1efd5..00000000000 --- a/cmd/guru/testdata/src/calls-json/main.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -// Tests of call-graph queries, -format=json. -// See go.tools/guru/guru_test.go for explanation. -// See calls-json.golden for expected query results. - -func call(f func()) { - f() // @callees @callees-f "f" -} - -func main() { - call(func() { - // @callers callers-main.anon "^" - // @callstack callstack-main.anon "^" - }) -} diff --git a/cmd/guru/testdata/src/calls-json/main.golden b/cmd/guru/testdata/src/calls-json/main.golden deleted file mode 100644 index 27dc50908da..00000000000 --- a/cmd/guru/testdata/src/calls-json/main.golden +++ /dev/null @@ -1,28 +0,0 @@ --------- @callees @callees-f -------- -{ - "pos": "testdata/src/calls-json/main.go:8:3", - "desc": "dynamic function call", - "callees": [ - { - "name": "calls-json.main$1", - "pos": "testdata/src/calls-json/main.go:12:7" - } - ] -} --------- @callstack callstack-main.anon -------- -{ - "pos": "testdata/src/calls-json/main.go:12:7", - "target": "calls-json.main$1", - "callers": [ - { - "pos": "testdata/src/calls-json/main.go:8:3", - "desc": "dynamic function call", - "caller": "calls-json.call" - }, - { - "pos": "testdata/src/calls-json/main.go:12:6", - "desc": "static function call", - "caller": "calls-json.main" - } - ] -} diff --git a/cmd/guru/testdata/src/calls/main.go b/cmd/guru/testdata/src/calls/main.go deleted file mode 100644 index a2089140201..00000000000 --- a/cmd/guru/testdata/src/calls/main.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "fmt" -) - -// Tests of call-graph queries. -// See go.tools/guru/guru_test.go for explanation. -// See calls.golden for expected query results. - -func A(x *int) { // @pointsto pointsto-A-x "x" - // @callers callers-A "^" - // @callstack callstack-A "^" -} - -func B(x *int) { // @pointsto pointsto-B-x "x" - // @callers callers-B "^" -} - -func foo() { -} - -// apply is not (yet) treated context-sensitively. -func apply(f func(x *int), x *int) { - f(x) // @callees callees-apply "f" - // @callers callers-apply "^" -} - -// store *is* treated context-sensitively, -// so the points-to sets for pc, pd are precise. -func store(ptr **int, value *int) { - *ptr = value - // @callers callers-store "^" -} - -func call(f func() *int) { - // Result points to anon function. - f() // @pointsto pointsto-result-f "f" - - // Target of call is anon function. - f() // @callees callees-main.call-f "f" - - // @callers callers-main.call "^" -} - -func main() { - var a, b int - go apply(A, &a) // @callees callees-main-apply1 "app" - defer apply(B, &b) - - var c, d int - var pc, pd *int // @pointsto pointsto-pc "pc" - store(&pc, &c) - store(&pd, &d) - _ = pd // @pointsto pointsto-pd "pd" - - call(func() *int { - // We are called twice from main.call - // @callers callers-main.anon "^" - return &a - }) - - // Errors - _ = "no function call here" // @callees callees-err-no-call "no" - print("builtin") // @callees callees-err-builtin "builtin" - _ = string("type conversion") // @callees callees-err-conversion "str" - call(nil) // @callees callees-err-bad-selection "call\\(nil" - if false { - main() // @callees callees-err-deadcode1 "main" - } - var nilFunc func() - nilFunc() // @callees callees-err-nil-func "nilFunc" - var i interface { - f() - } - i.f() // @callees callees-err-nil-interface "i.f" - - i = new(myint) - i.f() // @callees callees-not-a-wrapper "f" - - // statically dispatched calls. Handled specially by callees, so test that they work. - foo() // @callees callees-static-call "foo" - fmt.Println() // @callees callees-qualified-call "Println" - m := new(method) - m.f() // @callees callees-static-method-call "f" - g := new(embeddedIface) - g.iface = m - g.f() // @callees callees-implicit-selection-method-call "f" -} - -type myint int - -func (myint) f() { - // @callers callers-not-a-wrapper "^" -} - -type method int - -func (method) f() { -} - -type embeddedIface struct { - iface -} - -type iface interface { - f() -} - -var dynamic = func() {} - -func deadcode() { - main() // @callees callees-err-deadcode2 "main" - // @callers callers-err-deadcode "^" - // @callstack callstack-err-deadcode "^" - - // Within dead code, dynamic calls have no callees. - dynamic() // @callees callees-err-deadcode3 "dynamic" -} - -// This code belongs to init. -var global = 123 // @callers callers-global "global" - -// The package initializer may be called by other packages' inits, or -// in this case, the root of the callgraph. The source-level init functions -// are in turn called by it. -func init() { - // @callstack callstack-init "^" -} diff --git a/cmd/guru/testdata/src/calls/main.golden b/cmd/guru/testdata/src/calls/main.golden deleted file mode 100644 index ab68e95a630..00000000000 --- a/cmd/guru/testdata/src/calls/main.golden +++ /dev/null @@ -1,125 +0,0 @@ --------- @pointsto pointsto-A-x -------- -this *int may point to these objects: - a - b - --------- @callstack callstack-A -------- -Found a call path from root to calls.A -calls.A -dynamic function call from calls.apply -concurrent static function call from calls.main - --------- @pointsto pointsto-B-x -------- -this *int may point to these objects: - a - b - --------- @callers callers-B -------- -calls.B is called from these 1 sites: - dynamic function call from calls.apply - --------- @callees callees-apply -------- -this dynamic function call dispatches to: - calls.A - calls.B - --------- @callers callers-apply -------- -calls.apply is called from these 2 sites: - concurrent static function call from calls.main - deferred static function call from calls.main - --------- @callers callers-store -------- -calls.store is called from these 2 sites: - static function call from calls.main - static function call from calls.main - --------- @pointsto pointsto-result-f -------- -this func() *int may point to these objects: - calls.main$1 - --------- @callees callees-main.call-f -------- -this dynamic function call dispatches to: - calls.main$1 - --------- @callers callers-main.call -------- -calls.call is called from these 2 sites: - static function call from calls.main - static function call from calls.main - --------- @callees callees-main-apply1 -------- -this static function call dispatches to: - calls.apply - --------- @pointsto pointsto-pc -------- -this *int may point to these objects: - c - --------- @pointsto pointsto-pd -------- -this *int may point to these objects: - d - --------- @callees callees-err-no-call -------- - -Error: there is no function call here --------- @callees callees-err-builtin -------- - -Error: this is a call to the built-in 'print' operator --------- @callees callees-err-conversion -------- - -Error: this is a type conversion, not a function call --------- @callees callees-err-bad-selection -------- - -Error: ambiguous selection within function call (or conversion) --------- @callees callees-err-deadcode1 -------- -this static function call dispatches to: - calls.main - --------- @callees callees-err-nil-func -------- -dynamic function call on nil value - --------- @callees callees-err-nil-interface -------- -dynamic method call on nil value - --------- @callees callees-not-a-wrapper -------- -this dynamic method call dispatches to: - (calls.myint).f - --------- @callees callees-static-call -------- -this static function call dispatches to: - calls.foo - --------- @callees callees-qualified-call -------- -this static function call dispatches to: - fmt.Println - --------- @callees callees-static-method-call -------- -this static function call dispatches to: - (calls.method).f - --------- @callees callees-implicit-selection-method-call -------- -this dynamic method call dispatches to: - (calls.method).f - --------- @callers callers-not-a-wrapper -------- -(calls.myint).f is called from these 1 sites: - dynamic method call from calls.main - --------- @callees callees-err-deadcode2 -------- -this static function call dispatches to: - calls.main - --------- @callstack callstack-err-deadcode -------- -calls.deadcode is unreachable in this analysis scope - --------- @callees callees-err-deadcode3 -------- - -Error: this call site is unreachable in this analysis --------- @callers callers-global -------- -calls.init is called from these 1 sites: -the root of the call graph - --------- @callstack callstack-init -------- -Found a call path from root to calls.init#1 -calls.init#1 -static function call from calls.init - diff --git a/cmd/guru/testdata/src/imports/main.go b/cmd/guru/testdata/src/imports/main.go index 9fe2b711f8d..0fc40f22b4f 100644 --- a/cmd/guru/testdata/src/imports/main.go +++ b/cmd/guru/testdata/src/imports/main.go @@ -21,7 +21,7 @@ func main() { var t lib.Type // @describe ref-type "Type" p := t.Method(&a) // @describe ref-method "Method" - print(*p + 1) // @pointsto p "p " + print(*p + 1) var _ lib.Type // @describe ref-pkg "lib" diff --git a/cmd/guru/testdata/src/imports/main.golden b/cmd/guru/testdata/src/imports/main.golden index 1e1221789ea..18a3e22c8cc 100644 --- a/cmd/guru/testdata/src/imports/main.golden +++ b/cmd/guru/testdata/src/imports/main.golden @@ -37,10 +37,6 @@ Methods: reference to method func (lib.Type).Method(x *int) *int defined here --------- @pointsto p -------- -this *int may point to these objects: - imports.a - -------- @describe ref-pkg -------- reference to package "lib" const Const untyped int = 3 diff --git a/cmd/guru/testdata/src/peers-json/main.go b/cmd/guru/testdata/src/peers-json/main.go deleted file mode 100644 index ef63992b25e..00000000000 --- a/cmd/guru/testdata/src/peers-json/main.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -// Tests of channel 'peers' query, -format=json. -// See go.tools/guru/guru_test.go for explanation. -// See peers-json.golden for expected query results. - -func main() { - chA := make(chan *int) - <-chA - select { - case <-chA: // @peers peer-recv-chA "<-" - } -} diff --git a/cmd/guru/testdata/src/peers-json/main.golden b/cmd/guru/testdata/src/peers-json/main.golden deleted file mode 100644 index 50d571604c8..00000000000 --- a/cmd/guru/testdata/src/peers-json/main.golden +++ /dev/null @@ -1,12 +0,0 @@ --------- @peers peer-recv-chA -------- -{ - "pos": "testdata/src/peers-json/main.go:11:7", - "type": "chan *int", - "allocs": [ - "testdata/src/peers-json/main.go:8:13" - ], - "receives": [ - "testdata/src/peers-json/main.go:9:2", - "testdata/src/peers-json/main.go:11:7" - ] -} diff --git a/cmd/guru/testdata/src/peers/main.go b/cmd/guru/testdata/src/peers/main.go deleted file mode 100644 index 40ee205b277..00000000000 --- a/cmd/guru/testdata/src/peers/main.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -// Tests of channel 'peers' query. -// See go.tools/guru/guru_test.go for explanation. -// See peers.golden for expected query results. - -var a2 int - -func main() { - chA := make(chan *int) - a1 := 1 - chA <- &a1 - - chA2 := make(chan *int, 2) - if a2 == 0 { - chA = chA2 - } - - chB := make(chan *int) - b := 3 - chB <- &b - - <-chA // @pointsto pointsto-chA "chA" - <-chA2 // @pointsto pointsto-chA2 "chA2" - <-chB // @pointsto pointsto-chB "chB" - - select { - case rA := <-chA: // @peers peer-recv-chA "<-" - _ = rA // @pointsto pointsto-rA "rA" - case rB := <-chB: // @peers peer-recv-chB "<-" - _ = rB // @pointsto pointsto-rB "rB" - - case <-chA: // @peers peer-recv-chA' "<-" - - case chA2 <- &a2: // @peers peer-send-chA' "<-" - } - - for range chA { - } - - close(chA) // @peers peer-close-chA "chA" - - chC := make(chan *int) - (close)(chC) // @peers peer-close-chC "chC" - - close := func(ch chan *int) chan *int { - return ch - } - - close(chC) <- &b // @peers peer-send-chC "chC" - <-close(chC) // @peers peer-recv-chC "chC" -} diff --git a/cmd/guru/testdata/src/peers/main.golden b/cmd/guru/testdata/src/peers/main.golden deleted file mode 100644 index f97e672953e..00000000000 --- a/cmd/guru/testdata/src/peers/main.golden +++ /dev/null @@ -1,100 +0,0 @@ --------- @pointsto pointsto-chA -------- -this chan *int may point to these objects: - makechan - makechan - --------- @pointsto pointsto-chA2 -------- -this chan *int may point to these objects: - makechan - --------- @pointsto pointsto-chB -------- -this chan *int may point to these objects: - makechan - --------- @peers peer-recv-chA -------- -This channel of type chan *int may be: - allocated here - allocated here - sent to, here - sent to, here - received from, here - received from, here - received from, here - received from, here - received from, here - closed, here - --------- @pointsto pointsto-rA -------- -this *int may point to these objects: - peers.a2 - a1 - --------- @peers peer-recv-chB -------- -This channel of type chan *int may be: - allocated here - sent to, here - received from, here - received from, here - --------- @pointsto pointsto-rB -------- -this *int may point to these objects: - b - --------- @peers peer-recv-chA' -------- -This channel of type chan *int may be: - allocated here - allocated here - sent to, here - sent to, here - received from, here - received from, here - received from, here - received from, here - received from, here - closed, here - --------- @peers peer-send-chA' -------- -This channel of type chan *int may be: - allocated here - sent to, here - received from, here - received from, here - received from, here - received from, here - received from, here - closed, here - --------- @peers peer-close-chA -------- -This channel of type chan *int may be: - allocated here - allocated here - sent to, here - sent to, here - received from, here - received from, here - received from, here - received from, here - received from, here - closed, here - --------- @peers peer-close-chC -------- -This channel of type chan *int may be: - allocated here - sent to, here - received from, here - closed, here - --------- @peers peer-send-chC -------- -This channel of type chan *int may be: - allocated here - sent to, here - received from, here - closed, here - --------- @peers peer-recv-chC -------- -This channel of type chan *int may be: - allocated here - sent to, here - received from, here - closed, here - diff --git a/cmd/guru/testdata/src/pointsto-json/main.go b/cmd/guru/testdata/src/pointsto-json/main.go deleted file mode 100644 index 0a9f3186680..00000000000 --- a/cmd/guru/testdata/src/pointsto-json/main.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -// Tests of 'pointsto' queries, -format=json. -// See go.tools/guru/guru_test.go for explanation. -// See pointsto-json.golden for expected query results. - -func main() { // - var s struct{ x [3]int } - p := &s.x[0] // @pointsto val-p "p" - _ = p - - var i I = C(0) - if i == nil { - i = new(D) - } - print(i) // @pointsto val-i "\\bi\\b" -} - -type I interface { - f() -} - -type C int -type D struct{} - -func (c C) f() {} -func (d *D) f() {} diff --git a/cmd/guru/testdata/src/pointsto-json/main.golden b/cmd/guru/testdata/src/pointsto-json/main.golden deleted file mode 100644 index 06a2204a8cd..00000000000 --- a/cmd/guru/testdata/src/pointsto-json/main.golden +++ /dev/null @@ -1,29 +0,0 @@ --------- @pointsto val-p -------- -[ - { - "type": "*int", - "labels": [ - { - "pos": "testdata/src/pointsto-json/main.go:8:6", - "desc": "s.x[*]" - } - ] - } -] --------- @pointsto val-i -------- -[ - { - "type": "*D", - "namepos": "testdata/src/pointsto-json/main.go:24:6", - "labels": [ - { - "pos": "testdata/src/pointsto-json/main.go:14:10", - "desc": "new" - } - ] - }, - { - "type": "C", - "namepos": "testdata/src/pointsto-json/main.go:23:6" - } -] diff --git a/cmd/guru/testdata/src/pointsto/main.go b/cmd/guru/testdata/src/pointsto/main.go deleted file mode 100644 index c4ba2e258f4..00000000000 --- a/cmd/guru/testdata/src/pointsto/main.go +++ /dev/null @@ -1,75 +0,0 @@ -package main - -// Tests of 'pointsto' query. -// See go.tools/guru/guru_test.go for explanation. -// See pointsto.golden for expected query results. - -const pi = 3.141 // @pointsto const "pi" - -var global = new(string) // NB: ssa.Global is indirect, i.e. **string - -func main() { - livecode() - - // func objects - _ = main // @pointsto func-ref-main "main" - _ = (*C).f // @pointsto func-ref-*C.f "..C..f" - _ = D.f // @pointsto func-ref-D.f "D.f" - _ = I.f // @pointsto func-ref-I.f "I.f" - var d D - var i I - _ = d.f // @pointsto func-ref-d.f "d.f" - _ = i.f // @pointsto func-ref-i.f "i.f" - - // var objects - anon := func() { - _ = d.f // @pointsto ref-lexical-d.f "d.f" - } - _ = anon // @pointsto ref-anon "anon" - _ = global // @pointsto ref-global "global" - - // SSA affords some local flow sensitivity. - var a, b int - var x = &a // @pointsto var-def-x-1 "x" - _ = x // @pointsto var-ref-x-1 "x" - x = &b // @pointsto var-def-x-2 "x" - _ = x // @pointsto var-ref-x-2 "x" - - i = new(C) // @pointsto var-ref-i-C "i" - if i != nil { - i = D{} // @pointsto var-ref-i-D "i" - } - print(i) // @pointsto var-ref-i "\\bi\\b" - - m := map[string]*int{"a": &a} - mapval, _ := m["a"] // @pointsto map-lookup,ok "m..a.." - _ = mapval // @pointsto mapval "mapval" - _ = m // @pointsto m "m" - - if false { - panic(3) // @pointsto builtin-panic "panic" - } - - // NB: s.f is addressable per (*ssa.Program).VarValue, - // but our query concerns the object, not its address. - s := struct{ f interface{} }{f: make(chan bool)} - print(s.f) // @pointsto var-ref-s-f "s.f" -} - -func livecode() {} // @pointsto func-live "livecode" - -func deadcode() { // @pointsto func-dead "deadcode" - // Pointer analysis can't run on dead code. - var b = new(int) // @pointsto b "b" - _ = b -} - -type I interface { - f() -} - -type C int -type D struct{} - -func (c *C) f() {} -func (d D) f() {} diff --git a/cmd/guru/testdata/src/pointsto/main.golden b/cmd/guru/testdata/src/pointsto/main.golden deleted file mode 100644 index 40a830f0827..00000000000 --- a/cmd/guru/testdata/src/pointsto/main.golden +++ /dev/null @@ -1,96 +0,0 @@ --------- @pointsto const -------- - -Error: pointer analysis wants an expression of reference type; got untyped float --------- @pointsto func-ref-main -------- -this func() may point to these objects: - pointsto.main - --------- @pointsto func-ref-*C.f -------- -this func() may point to these objects: - (*pointsto.C).f - --------- @pointsto func-ref-D.f -------- -this func() may point to these objects: - (pointsto.D).f - --------- @pointsto func-ref-I.f -------- - -Error: func (pointsto.I).f() is an interface method --------- @pointsto func-ref-d.f -------- -this func() may point to these objects: - (pointsto.D).f - --------- @pointsto func-ref-i.f -------- - -Error: func (pointsto.I).f() is an interface method --------- @pointsto ref-lexical-d.f -------- -this func() may point to these objects: - (pointsto.D).f - --------- @pointsto ref-anon -------- -this func() may point to these objects: - pointsto.main$1 - --------- @pointsto ref-global -------- -this *string may point to these objects: - new - --------- @pointsto var-def-x-1 -------- -this *int may point to these objects: - a - --------- @pointsto var-ref-x-1 -------- -this *int may point to these objects: - a - --------- @pointsto var-def-x-2 -------- -this *int may point to these objects: - b - --------- @pointsto var-ref-x-2 -------- -this *int may point to these objects: - b - --------- @pointsto var-ref-i-C -------- -this I may contain these dynamic types: - *C, may point to: - new - --------- @pointsto var-ref-i-D -------- -this I may contain these dynamic types: - D - --------- @pointsto var-ref-i -------- -this I may contain these dynamic types: - *C, may point to: - new - D - --------- @pointsto map-lookup,ok -------- - -Error: pointer analysis wants an expression of reference type; got (*int, bool) --------- @pointsto mapval -------- -this *int may point to these objects: - a - --------- @pointsto m -------- -this map[string]*int may point to these objects: - makemap - --------- @pointsto builtin-panic -------- - -Error: pointer analysis wants an expression of reference type; got () --------- @pointsto var-ref-s-f -------- -this any may contain these dynamic types: - chan bool, may point to: - makechan - --------- @pointsto func-live -------- - -Error: pointer analysis did not find expression (dead code?) --------- @pointsto func-dead -------- - -Error: pointer analysis did not find expression (dead code?) --------- @pointsto b -------- - -Error: pointer analysis did not find expression (dead code?) diff --git a/cmd/guru/testdata/src/reflection/main.go b/cmd/guru/testdata/src/reflection/main.go deleted file mode 100644 index 392643baa8c..00000000000 --- a/cmd/guru/testdata/src/reflection/main.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -// This is a test of 'pointsto', but we split it into a separate file -// so that pointsto.go doesn't have to import "reflect" each time. - -import "reflect" - -var a int -var b bool - -func main() { - m := make(map[*int]*bool) - m[&a] = &b - - mrv := reflect.ValueOf(m) - if a > 0 { - mrv = reflect.ValueOf(&b) - } - if a > 0 { - mrv = reflect.ValueOf(&a) - } - - _ = mrv // @pointsto mrv "mrv" - p1 := mrv.Interface() // @pointsto p1 "p1" - p2 := mrv.MapKeys() // @pointsto p2 "p2" - p3 := p2[0] // @pointsto p3 "p3" - p4 := reflect.TypeOf(p1) // @pointsto p4 "p4" - - _, _, _, _ = p1, p2, p3, p4 -} diff --git a/cmd/guru/testdata/src/reflection/main.golden b/cmd/guru/testdata/src/reflection/main.golden deleted file mode 100644 index 2a84071a324..00000000000 --- a/cmd/guru/testdata/src/reflection/main.golden +++ /dev/null @@ -1,34 +0,0 @@ --------- @pointsto mrv -------- -this reflect.Value may contain these dynamic types: - *bool, may point to: - reflection.b - *int, may point to: - reflection.a - map[*int]*bool, may point to: - makemap - --------- @pointsto p1 -------- -this any may contain these dynamic types: - *bool, may point to: - reflection.b - *int, may point to: - reflection.a - map[*int]*bool, may point to: - makemap - --------- @pointsto p2 -------- -this []reflect.Value may point to these objects: - - --------- @pointsto p3 -------- -this reflect.Value may contain these dynamic types: - *int, may point to: - reflection.a - --------- @pointsto p4 -------- -this reflect.Type may contain these dynamic types: - *reflect.rtype, may point to: - *bool - *int - map[*int]*bool - diff --git a/cmd/guru/testdata/src/softerrs/main.go b/cmd/guru/testdata/src/softerrs/main.go deleted file mode 100644 index f7254b83891..00000000000 --- a/cmd/guru/testdata/src/softerrs/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -// Tests of various queries on a program containing only "soft" errors. -// See go.tools/guru/guru_test.go for explanation. -// See main.golden for expected query results. - -func _() { - var i int // "unused var" is a soft error -} - -func f() {} // @callers softerrs-callers-f "f" - -func main() { - f() // @describe softerrs-describe-f "f" -} diff --git a/cmd/guru/testdata/src/softerrs/main.golden b/cmd/guru/testdata/src/softerrs/main.golden deleted file mode 100644 index ae95f46dc6c..00000000000 --- a/cmd/guru/testdata/src/softerrs/main.golden +++ /dev/null @@ -1,8 +0,0 @@ --------- @callers softerrs-callers-f -------- -softerrs.f is called from these 1 sites: - static function call from softerrs.main - --------- @describe softerrs-describe-f -------- -reference to func f() -defined here - diff --git a/cmd/guru/testdata/src/what-json/main.golden b/cmd/guru/testdata/src/what-json/main.golden index 320c52bd45b..760f9d7e8ee 100644 --- a/cmd/guru/testdata/src/what-json/main.golden +++ b/cmd/guru/testdata/src/what-json/main.golden @@ -33,16 +33,11 @@ } ], "modes": [ - "callees", - "callers", - "callstack", "definition", "describe", "freevars", "implements", - "pointsto", - "referrers", - "whicherrs" + "referrers" ], "srcdir": "testdata/src", "importpath": "what-json" @@ -81,9 +76,7 @@ "describe", "freevars", "implements", - "pointsto", - "referrers", - "whicherrs" + "referrers" ], "srcdir": "testdata/src", "importpath": "what-json", diff --git a/cmd/guru/testdata/src/what/main.golden b/cmd/guru/testdata/src/what/main.golden index f113e2f85f7..dbd1cc2afe2 100644 --- a/cmd/guru/testdata/src/what/main.golden +++ b/cmd/guru/testdata/src/what/main.golden @@ -1,7 +1,7 @@ -------- @what pkgdecl -------- identifier source file -modes: [definition describe freevars implements pointsto referrers whicherrs] +modes: [definition describe freevars implements referrers] srcdir: testdata/src import path: what @@ -12,7 +12,7 @@ expression statement block function declaration source file -modes: [callees callers callstack definition describe freevars implements pointsto referrers whicherrs] +modes: [definition describe freevars implements referrers] srcdir: testdata/src import path: what @@ -22,7 +22,7 @@ variable declaration statement block function declaration source file -modes: [callers callstack describe freevars pointsto whicherrs] +modes: [describe freevars] srcdir: testdata/src import path: what @@ -33,7 +33,7 @@ expression statement block function declaration source file -modes: [callers callstack definition describe freevars implements peers pointsto referrers whicherrs] +modes: [definition describe freevars implements referrers] srcdir: testdata/src import path: what ch diff --git a/cmd/guru/testdata/src/whicherrs/main.go b/cmd/guru/testdata/src/whicherrs/main.go deleted file mode 100644 index d1613c58396..00000000000 --- a/cmd/guru/testdata/src/whicherrs/main.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -type errType string - -const constErr errType = "blah" - -func (et errType) Error() string { - return string(et) -} - -var errVar error = errType("foo") - -func genErr(i int) error { - switch i { - case 0: - return constErr - case 1: - return errVar - default: - return nil - } -} - -func unreachable() { - err := errVar // @whicherrs func-dead "err" - _ = err -} - -func main() { - err := genErr(0) // @whicherrs localerrs "err" - _ = err -} diff --git a/cmd/guru/testdata/src/whicherrs/main.golden b/cmd/guru/testdata/src/whicherrs/main.golden deleted file mode 100644 index 3484752c51d..00000000000 --- a/cmd/guru/testdata/src/whicherrs/main.golden +++ /dev/null @@ -1,11 +0,0 @@ --------- @whicherrs func-dead -------- - -Error: pointer analysis did not find expression (dead code?) --------- @whicherrs localerrs -------- -this error may point to these globals: - errVar -this error may contain these constants: - constErr -this error may contain these dynamic types: - errType - diff --git a/cmd/guru/what.go b/cmd/guru/what.go index 7ebabbd8217..422c6c10950 100644 --- a/cmd/guru/what.go +++ b/cmd/guru/what.go @@ -43,22 +43,11 @@ func what(q *Query) error { } for _, n := range qpos.path { - switch n := n.(type) { + switch n.(type) { case *ast.Ident: enable["definition"] = true enable["referrers"] = true enable["implements"] = true - case *ast.CallExpr: - enable["callees"] = true - case *ast.FuncDecl: - enable["callers"] = true - enable["callstack"] = true - case *ast.SendStmt: - enable["peers"] = true - case *ast.UnaryExpr: - if n.Op == token.ARROW { - enable["peers"] = true - } } // For implements, we approximate findInterestingNode. @@ -73,37 +62,10 @@ func what(q *Query) error { enable["implements"] = true } } - - // For pointsto and whicherrs, we approximate findInterestingNode. - if _, ok := enable["pointsto"]; !ok { - switch n.(type) { - case ast.Stmt, - *ast.ArrayType, - *ast.StructType, - *ast.FuncType, - *ast.InterfaceType, - *ast.MapType, - *ast.ChanType: - // not an expression - enable["pointsto"] = false - enable["whicherrs"] = false - - case ast.Expr, ast.Decl, *ast.ValueSpec: - // an expression, maybe - enable["pointsto"] = true - enable["whicherrs"] = true - - default: - // Comment, Field, KeyValueExpr, etc: ascend. - } - } } // If we don't have an exact selection, disable modes that need one. if !qpos.exact { - enable["callees"] = false - enable["pointsto"] = false - enable["whicherrs"] = false enable["describe"] = false } diff --git a/cmd/guru/whicherrs.go b/cmd/guru/whicherrs.go deleted file mode 100644 index 3a81bf56a16..00000000000 --- a/cmd/guru/whicherrs.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "sort" - - "golang.org/x/tools/cmd/guru/serial" - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -var builtinErrorType = types.Universe.Lookup("error").Type() - -// whicherrs takes an position to an error and tries to find all types, constants -// and global value which a given error can point to and which can be checked from the -// scope where the error lives. -// In short, it returns a list of things that can be checked against in order to handle -// an error properly. -// -// TODO(dmorsing): figure out if fields in errors like *os.PathError.Err -// can be queried recursively somehow. -func whicherrs(q *Query) error { - lconf := loader.Config{Build: q.Build} - - if err := setPTAScope(&lconf, q.Scope); err != nil { - return err - } - - // Load/parse/type-check the program. - lprog, err := loadWithSoftErrors(&lconf) - if err != nil { - return err - } - - qpos, err := parseQueryPos(lprog, q.Pos, true) // needs exact pos - if err != nil { - return err - } - - prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) - - ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection) - if err != nil { - return err - } - - path, action := findInterestingNode(qpos.info, qpos.path) - if action != actionExpr { - return fmt.Errorf("whicherrs wants an expression; got %s", - astutil.NodeDescription(qpos.path[0])) - } - var expr ast.Expr - var obj types.Object - switch n := path[0].(type) { - case *ast.ValueSpec: - // ambiguous ValueSpec containing multiple names - return fmt.Errorf("multiple value specification") - case *ast.Ident: - obj = qpos.info.ObjectOf(n) - expr = n - case ast.Expr: - expr = n - default: - return fmt.Errorf("unexpected AST for expr: %T", n) - } - - typ := qpos.info.TypeOf(expr) - if !types.Identical(typ, builtinErrorType) { - return fmt.Errorf("selection is not an expression of type 'error'") - } - // Determine the ssa.Value for the expression. - var value ssa.Value - if obj != nil { - // def/ref of func/var object - value, _, err = ssaValueForIdent(prog, qpos.info, obj, path) - } else { - value, _, err = ssaValueForExpr(prog, qpos.info, path) - } - if err != nil { - return err // e.g. trivially dead code - } - - // Defer SSA construction till after errors are reported. - prog.Build() - - globals := findVisibleErrs(prog, qpos) - constants := findVisibleConsts(prog, qpos) - - res := &whicherrsResult{ - qpos: qpos, - errpos: expr.Pos(), - } - - // TODO(adonovan): the following code is heavily duplicated - // w.r.t. "pointsto". Refactor? - - // Find the instruction which initialized the - // global error. If more than one instruction has stored to the global - // remove the global from the set of values that we want to query. - allFuncs := ssautil.AllFunctions(prog) - for fn := range allFuncs { - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - store, ok := instr.(*ssa.Store) - if !ok { - continue - } - gval, ok := store.Addr.(*ssa.Global) - if !ok { - continue - } - gbl, ok := globals[gval] - if !ok { - continue - } - // we already found a store to this global - // The normal error define is just one store in the init - // so we just remove this global from the set we want to query - if gbl != nil { - delete(globals, gval) - } - globals[gval] = store.Val - } - } - } - - ptaConfig.AddQuery(value) - for _, v := range globals { - ptaConfig.AddQuery(v) - } - - ptares := ptrAnalysis(ptaConfig) - valueptr := ptares.Queries[value] - if valueptr == (pointer.Pointer{}) { - return fmt.Errorf("pointer analysis did not find expression (dead code?)") - } - for g, v := range globals { - ptr, ok := ptares.Queries[v] - if !ok { - continue - } - if !ptr.MayAlias(valueptr) { - continue - } - res.globals = append(res.globals, g) - } - pts := valueptr.PointsTo() - dedup := make(map[*ssa.NamedConst]bool) - for _, label := range pts.Labels() { - // These values are either MakeInterfaces or reflect - // generated interfaces. For the purposes of this - // analysis, we don't care about reflect generated ones - makeiface, ok := label.Value().(*ssa.MakeInterface) - if !ok { - continue - } - constval, ok := makeiface.X.(*ssa.Const) - if !ok { - continue - } - c := constants[*constval] - if c != nil && !dedup[c] { - dedup[c] = true - res.consts = append(res.consts, c) - } - } - concs := pts.DynamicTypes() - concs.Iterate(func(conc types.Type, _ interface{}) { - // go/types is a bit annoying here. - // We want to find all the types that we can - // typeswitch or assert to. This means finding out - // if the type pointed to can be seen by us. - // - // For the purposes of this analysis, we care only about - // TypeNames of Named or pointer-to-Named types. - // We ignore other types (e.g. structs) that implement error. - var name *types.TypeName - switch t := conc.(type) { - case *types.Pointer: - named, ok := t.Elem().(*types.Named) - if !ok { - return - } - name = named.Obj() - case *types.Named: - name = t.Obj() - default: - return - } - if !isAccessibleFrom(name, qpos.info.Pkg) { - return - } - res.types = append(res.types, &errorType{conc, name}) - }) - sort.Sort(membersByPosAndString(res.globals)) - sort.Sort(membersByPosAndString(res.consts)) - sort.Sort(sorterrorType(res.types)) - - q.Output(lprog.Fset, res) - return nil -} - -// findVisibleErrs returns a mapping from each package-level variable of type "error" to nil. -func findVisibleErrs(prog *ssa.Program, qpos *queryPos) map[*ssa.Global]ssa.Value { - globals := make(map[*ssa.Global]ssa.Value) - for _, pkg := range prog.AllPackages() { - for _, mem := range pkg.Members { - gbl, ok := mem.(*ssa.Global) - if !ok { - continue - } - gbltype := gbl.Type() - // globals are always pointers - if !types.Identical(deref(gbltype), builtinErrorType) { - continue - } - if !isAccessibleFrom(gbl.Object(), qpos.info.Pkg) { - continue - } - globals[gbl] = nil - } - } - return globals -} - -// findVisibleConsts returns a mapping from each package-level constant assignable to type "error", to nil. -func findVisibleConsts(prog *ssa.Program, qpos *queryPos) map[ssa.Const]*ssa.NamedConst { - constants := make(map[ssa.Const]*ssa.NamedConst) - for _, pkg := range prog.AllPackages() { - for _, mem := range pkg.Members { - obj, ok := mem.(*ssa.NamedConst) - if !ok { - continue - } - consttype := obj.Type() - if !types.AssignableTo(consttype, builtinErrorType) { - continue - } - if !isAccessibleFrom(obj.Object(), qpos.info.Pkg) { - continue - } - constants[*obj.Value] = obj - } - } - - return constants -} - -type membersByPosAndString []ssa.Member - -func (a membersByPosAndString) Len() int { return len(a) } -func (a membersByPosAndString) Less(i, j int) bool { - cmp := a[i].Pos() - a[j].Pos() - return cmp < 0 || cmp == 0 && a[i].String() < a[j].String() -} -func (a membersByPosAndString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type sorterrorType []*errorType - -func (a sorterrorType) Len() int { return len(a) } -func (a sorterrorType) Less(i, j int) bool { - cmp := a[i].obj.Pos() - a[j].obj.Pos() - return cmp < 0 || cmp == 0 && a[i].typ.String() < a[j].typ.String() -} -func (a sorterrorType) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type errorType struct { - typ types.Type // concrete type N or *N that implements error - obj *types.TypeName // the named type N -} - -type whicherrsResult struct { - qpos *queryPos - errpos token.Pos - globals []ssa.Member - consts []ssa.Member - types []*errorType -} - -func (r *whicherrsResult) PrintPlain(printf printfFunc) { - if len(r.globals) > 0 { - printf(r.qpos, "this error may point to these globals:") - for _, g := range r.globals { - printf(g.Pos(), "\t%s", g.RelString(r.qpos.info.Pkg)) - } - } - if len(r.consts) > 0 { - printf(r.qpos, "this error may contain these constants:") - for _, c := range r.consts { - printf(c.Pos(), "\t%s", c.RelString(r.qpos.info.Pkg)) - } - } - if len(r.types) > 0 { - printf(r.qpos, "this error may contain these dynamic types:") - for _, t := range r.types { - printf(t.obj.Pos(), "\t%s", r.qpos.typeString(t.typ)) - } - } -} - -func (r *whicherrsResult) JSON(fset *token.FileSet) []byte { - we := &serial.WhichErrs{} - we.ErrPos = fset.Position(r.errpos).String() - for _, g := range r.globals { - we.Globals = append(we.Globals, fset.Position(g.Pos()).String()) - } - for _, c := range r.consts { - we.Constants = append(we.Constants, fset.Position(c.Pos()).String()) - } - for _, t := range r.types { - var et serial.WhichErrsType - et.Type = r.qpos.typeString(t.typ) - et.Position = fset.Position(t.obj.Pos()).String() - we.Types = append(we.Types, et) - } - return toJSON(we) -} diff --git a/go/callgraph/callgraph_test.go b/go/callgraph/callgraph_test.go index dd6baafa5ec..aa8aca08597 100644 --- a/go/callgraph/callgraph_test.go +++ b/go/callgraph/callgraph_test.go @@ -14,7 +14,6 @@ import ( "golang.org/x/tools/go/callgraph/static" "golang.org/x/tools/go/callgraph/vta" "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" ) @@ -139,21 +138,6 @@ func BenchmarkRTA(b *testing.B) { } } -func BenchmarkPTA(b *testing.B) { - b.StopTimer() - _, main := example() - b.StartTimer() - - for i := 0; i < b.N; i++ { - config := &pointer.Config{Mains: []*ssa.Package{main.Pkg}, BuildCallGraph: true} - res, err := pointer.Analyze(config) - if err != nil { - b.Fatal(err) - } - logStats(b, i == 0, "pta", res.CallGraph, main) - } -} - func BenchmarkVTA(b *testing.B) { b.StopTimer() prog, main := example() From a260315e300ac46cc74b461bb418e4a800ee55bf Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 31 May 2023 14:51:48 -0400 Subject: [PATCH 083/109] go/pointer: create submodule The pointer analysis is deprecated. Once this module is tagged, we will delete it. Clients will be able to use only the last version. Updates golang/go#59676 Change-Id: If6e2214d964f0bbd1267634a1ad5c0d3166ecf0e Reviewed-on: https://go-review.googlesource.com/c/tools/+/499695 Run-TryBot: Alan Donovan Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Robert Findley TryBot-Result: Gopher Robot Auto-Submit: Alan Donovan --- go/pointer/go.mod | 12 ++++++++++++ go/pointer/go.sum | 5 +++++ 2 files changed, 17 insertions(+) create mode 100644 go/pointer/go.mod create mode 100644 go/pointer/go.sum diff --git a/go/pointer/go.mod b/go/pointer/go.mod new file mode 100644 index 00000000000..f2d9be9262c --- /dev/null +++ b/go/pointer/go.mod @@ -0,0 +1,12 @@ +module golang.org/x/tools/go/pointer + +go 1.18 // tagx:compat 1.16 + +require ( + golang.org/x/sys v0.8.0 + golang.org/x/tools v0.9.2 +) + +require golang.org/x/mod v0.10.0 // indirect + +replace golang.org/x/tools => ../../ diff --git a/go/pointer/go.sum b/go/pointer/go.sum new file mode 100644 index 00000000000..7d386a87bf5 --- /dev/null +++ b/go/pointer/go.sum @@ -0,0 +1,5 @@ +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 98f1b4dee82ca3ded542b3c37a62f58f0eb744b7 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 31 May 2023 17:58:19 -0400 Subject: [PATCH 084/109] gopls/internal/lsp/cache: check number of orphaned files after filtering I noticed redundant "reloadOrphanedFiles reloading" logs in https://storage.googleapis.com/go-build-log/0b9348fc/openbsd-amd64-72_04342286.log This is because we were checking for no reloadable files before filtering out unloadable files, not after. Fix this logic error. Change-Id: Ib7ad122bb7f96fdf53474c329fac1ec8ec0e1ef3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499755 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan --- gopls/internal/lsp/cache/snapshot.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index 7e0a9ba196b..bff3dc17f63 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -1605,9 +1605,6 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { files = append(files, o) } } - if len(files) == 0 { - return nil - } // Filter to files that are not known to be unloadable. s.mu.Lock() @@ -1620,6 +1617,10 @@ func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error { files = loadable s.mu.Unlock() + if len(files) == 0 { + return nil + } + var uris []span.URI for _, file := range files { uris = append(uris, file.URI()) From 0dda7d614e545bb93966bb1410269b3cd1e65e1d Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 31 May 2023 18:05:46 -0400 Subject: [PATCH 085/109] go/pointer: remove replace directive This makes it so that 'go install golang.org/x/tools/go/pointer@latest' will continue to work. ('go install pkg@version' does not allow replace directives.) Updates golang/go#59676 Change-Id: I7dcb95a5730bd68d42a5873f9f9cec6bef773205 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499756 gopls-CI: kokoro Run-TryBot: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- go/pointer/go.mod | 10 ++++------ go/pointer/go.sum | 2 ++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go/pointer/go.mod b/go/pointer/go.mod index f2d9be9262c..168aa253859 100644 --- a/go/pointer/go.mod +++ b/go/pointer/go.mod @@ -2,11 +2,9 @@ module golang.org/x/tools/go/pointer go 1.18 // tagx:compat 1.16 +require golang.org/x/sys v0.8.0 + require ( - golang.org/x/sys v0.8.0 - golang.org/x/tools v0.9.2 + golang.org/x/mod v0.10.0 // indirect + golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a ) - -require golang.org/x/mod v0.10.0 // indirect - -replace golang.org/x/tools => ../../ diff --git a/go/pointer/go.sum b/go/pointer/go.sum index 7d386a87bf5..c7c351dba1d 100644 --- a/go/pointer/go.sum +++ b/go/pointer/go.sum @@ -3,3 +3,5 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a h1:rym71QNKHeCt6OA9UbKSr3jmBnbRNACMDLX5zI18ZOk= +golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= From 77fd064f3b7487544bbbafc80b59080134571cc7 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 29 May 2023 21:37:17 -0400 Subject: [PATCH 086/109] go/types/objectpath: remove unnecessary unsafe import Change-Id: Id4b48009227c69619538ad1892334c9ae822f7f3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499796 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- go/types/objectpath/objectpath.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/go/types/objectpath/objectpath.go b/go/types/objectpath/objectpath.go index aa7dfaccf56..6cbb663b6b7 100644 --- a/go/types/objectpath/objectpath.go +++ b/go/types/objectpath/objectpath.go @@ -31,8 +31,6 @@ import ( "strings" "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname ) // A Path is an opaque name that identifies a types.Object From c6d86c4f3063116c137b3ee5d5c3d5f9f251c473 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 31 May 2023 18:08:32 -0400 Subject: [PATCH 087/109] go/pointer: delete package Fixes golang/go#59676 Change-Id: Ibd4ffa3e4571ddc01d482e93147bbc4cef55cc6d Reviewed-on: https://go-review.googlesource.com/c/tools/+/499757 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro --- go/pointer/TODO | 33 - go/pointer/analysis.go | 462 ------ go/pointer/api.go | 287 ---- go/pointer/callgraph.go | 60 - go/pointer/constraint.go | 149 -- go/pointer/doc.go | 625 -------- go/pointer/example_test.go | 125 -- go/pointer/gen.go | 1364 ------------------ go/pointer/go.mod | 10 - go/pointer/go.sum | 7 - go/pointer/hvn.go | 968 ------------- go/pointer/intrinsics.go | 359 ----- go/pointer/labels.go | 150 -- go/pointer/opt.go | 131 -- go/pointer/pointer_go117_test.go | 49 - go/pointer/pointer_race_test.go | 12 - go/pointer/pointer_test.go | 730 ---------- go/pointer/print.go | 43 - go/pointer/query.go | 225 --- go/pointer/query_test.go | 80 -- go/pointer/reflect.go | 1973 -------------------------- go/pointer/solve.go | 366 ----- go/pointer/stdlib_test.go | 105 -- go/pointer/testdata/a_test.go | 44 - go/pointer/testdata/another.go | 37 - go/pointer/testdata/arrayreflect.go | 192 --- go/pointer/testdata/arrays.go | 98 -- go/pointer/testdata/arrays_go117.go | 173 --- go/pointer/testdata/channels.go | 119 -- go/pointer/testdata/chanreflect.go | 86 -- go/pointer/testdata/chanreflect1.go | 35 - go/pointer/testdata/context.go | 49 - go/pointer/testdata/conv.go | 64 - go/pointer/testdata/extended.go | 22 - go/pointer/testdata/finalizer.go | 89 -- go/pointer/testdata/flow.go | 64 - go/pointer/testdata/fmtexcerpt.go | 43 - go/pointer/testdata/func.go | 206 --- go/pointer/testdata/funcreflect.go | 131 -- go/pointer/testdata/hello.go | 28 - go/pointer/testdata/interfaces.go | 153 -- go/pointer/testdata/issue9002.go | 17 - go/pointer/testdata/mapreflect.go | 118 -- go/pointer/testdata/maps.go | 109 -- go/pointer/testdata/panic.go | 37 - go/pointer/testdata/recur.go | 12 - go/pointer/testdata/reflect.go | 118 -- go/pointer/testdata/rtti.go | 29 - go/pointer/testdata/structreflect.go | 45 - go/pointer/testdata/structs.go | 101 -- go/pointer/testdata/timer.go | 24 - go/pointer/testdata/typeparams.go | 68 - go/pointer/util.go | 315 ---- 53 files changed, 10939 deletions(-) delete mode 100644 go/pointer/TODO delete mode 100644 go/pointer/analysis.go delete mode 100644 go/pointer/api.go delete mode 100644 go/pointer/callgraph.go delete mode 100644 go/pointer/constraint.go delete mode 100644 go/pointer/doc.go delete mode 100644 go/pointer/example_test.go delete mode 100644 go/pointer/gen.go delete mode 100644 go/pointer/go.mod delete mode 100644 go/pointer/go.sum delete mode 100644 go/pointer/hvn.go delete mode 100644 go/pointer/intrinsics.go delete mode 100644 go/pointer/labels.go delete mode 100644 go/pointer/opt.go delete mode 100644 go/pointer/pointer_go117_test.go delete mode 100644 go/pointer/pointer_race_test.go delete mode 100644 go/pointer/pointer_test.go delete mode 100644 go/pointer/print.go delete mode 100644 go/pointer/query.go delete mode 100644 go/pointer/query_test.go delete mode 100644 go/pointer/reflect.go delete mode 100644 go/pointer/solve.go delete mode 100644 go/pointer/stdlib_test.go delete mode 100644 go/pointer/testdata/a_test.go delete mode 100644 go/pointer/testdata/another.go delete mode 100644 go/pointer/testdata/arrayreflect.go delete mode 100644 go/pointer/testdata/arrays.go delete mode 100644 go/pointer/testdata/arrays_go117.go delete mode 100644 go/pointer/testdata/channels.go delete mode 100644 go/pointer/testdata/chanreflect.go delete mode 100644 go/pointer/testdata/chanreflect1.go delete mode 100644 go/pointer/testdata/context.go delete mode 100644 go/pointer/testdata/conv.go delete mode 100644 go/pointer/testdata/extended.go delete mode 100644 go/pointer/testdata/finalizer.go delete mode 100644 go/pointer/testdata/flow.go delete mode 100644 go/pointer/testdata/fmtexcerpt.go delete mode 100644 go/pointer/testdata/func.go delete mode 100644 go/pointer/testdata/funcreflect.go delete mode 100644 go/pointer/testdata/hello.go delete mode 100644 go/pointer/testdata/interfaces.go delete mode 100644 go/pointer/testdata/issue9002.go delete mode 100644 go/pointer/testdata/mapreflect.go delete mode 100644 go/pointer/testdata/maps.go delete mode 100644 go/pointer/testdata/panic.go delete mode 100644 go/pointer/testdata/recur.go delete mode 100644 go/pointer/testdata/reflect.go delete mode 100644 go/pointer/testdata/rtti.go delete mode 100644 go/pointer/testdata/structreflect.go delete mode 100644 go/pointer/testdata/structs.go delete mode 100644 go/pointer/testdata/timer.go delete mode 100644 go/pointer/testdata/typeparams.go delete mode 100644 go/pointer/util.go diff --git a/go/pointer/TODO b/go/pointer/TODO deleted file mode 100644 index f95e70621d9..00000000000 --- a/go/pointer/TODO +++ /dev/null @@ -1,33 +0,0 @@ --*- text -*- - -Pointer analysis to-do list -=========================== - -CONSTRAINT GENERATION: -- support reflection: - - a couple of operators are missing - - reflect.Values may contain lvalues (CanAddr) -- implement native intrinsics. These vary by platform. -- add to pts(a.panic) a label representing all runtime panics, e.g. - runtime.{TypeAssertionError,errorString,errorCString}. - -OPTIMISATIONS -- pre-solver: - pointer equivalence: extend HVN to HRU - location equivalence -- solver: HCD, LCD. -- experiment with map+slice worklist in lieu of bitset. - It may have faster insert. - -MISC: -- Test on all platforms. - Currently we assume these go/build tags: linux, amd64, !cgo. - -MAINTAINABILITY -- Think about ways to make debugging this code easier. PTA logs - routinely exceed a million lines and require training to read. - -BUGS: -- There's a crash bug in stdlib_test + reflection, rVCallConstraint. - - diff --git a/go/pointer/analysis.go b/go/pointer/analysis.go deleted file mode 100644 index e3c85ede4f7..00000000000 --- a/go/pointer/analysis.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file defines the main datatypes and Analyze function of the pointer analysis. - -import ( - "fmt" - "go/token" - "go/types" - "io" - "os" - "reflect" - "runtime" - "runtime/debug" - "sort" - "strings" - - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/types/typeutil" -) - -const ( - // optimization options; enable all when committing - optRenumber = true // enable renumbering optimization (makes logs hard to read) - optHVN = true // enable pointer equivalence via Hash-Value Numbering - - // debugging options; disable all when committing - debugHVN = false // enable assertions in HVN - debugHVNVerbose = false // enable extra HVN logging - debugHVNCrossCheck = false // run solver with/without HVN and compare (caveats below) - debugTimers = false // show running time of each phase -) - -// object.flags bitmask values. -const ( - otTagged = 1 << iota // type-tagged object - otIndirect // type-tagged object with indirect payload - otFunction // function object -) - -// An object represents a contiguous block of memory to which some -// (generalized) pointer may point. -// -// (Note: most variables called 'obj' are not *objects but nodeids -// such that a.nodes[obj].obj != nil.) -type object struct { - // flags is a bitset of the node type (ot*) flags defined above. - flags uint32 - - // Number of following nodes belonging to the same "object" - // allocation. Zero for all other nodes. - size uint32 - - // data describes this object; it has one of these types: - // - // ssa.Value for an object allocated by an SSA operation. - // types.Type for an rtype instance object or *rtype-tagged object. - // string for an intrinsic object, e.g. the array behind os.Args. - // nil for an object allocated by an intrinsic. - // (cgn provides the identity of the intrinsic.) - data interface{} - - // The call-graph node (=context) in which this object was allocated. - // May be nil for global objects: Global, Const, some Functions. - cgn *cgnode -} - -// nodeid denotes a node. -// It is an index within analysis.nodes. -// We use small integers, not *node pointers, for many reasons: -// - they are smaller on 64-bit systems. -// - sets of them can be represented compactly in bitvectors or BDDs. -// - order matters; a field offset can be computed by simple addition. -type nodeid uint32 - -// A node is an equivalence class of memory locations. -// Nodes may be pointers, pointed-to locations, neither, or both. -// -// Nodes that are pointed-to locations ("labels") have an enclosing -// object (see analysis.enclosingObject). -type node struct { - // If non-nil, this node is the start of an object - // (addressable memory location). - // The following obj.size nodes implicitly belong to the object; - // they locate their object by scanning back. - obj *object - - // The type of the field denoted by this node. Non-aggregate, - // unless this is an tagged.T node (i.e. the thing - // pointed to by an interface) in which case typ is that type. - typ types.Type - - // subelement indicates which directly embedded subelement of - // an object of aggregate type (struct, tuple, array) this is. - subelement *fieldInfo // e.g. ".a.b[*].c" - - // Solver state for the canonical node of this pointer- - // equivalence class. Each node is created with its own state - // but they become shared after HVN. - solve *solverState -} - -// An analysis instance holds the state of a single pointer analysis problem. -type analysis struct { - config *Config // the client's control/observer interface - prog *ssa.Program // the program being analyzed - log io.Writer // log stream; nil to disable - panicNode nodeid // sink for panic, source for recover - nodes []*node // indexed by nodeid - flattenMemo map[types.Type][]*fieldInfo // memoization of flatten() - trackTypes map[types.Type]bool // memoization of shouldTrack() - constraints []constraint // set of constraints - cgnodes []*cgnode // all cgnodes - genq []*cgnode // queue of functions to generate constraints for - intrinsics map[*ssa.Function]intrinsic // non-nil values are summaries for intrinsic fns - globalval map[ssa.Value]nodeid // node for each global ssa.Value - globalobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton - localval map[ssa.Value]nodeid // node for each local ssa.Value - localobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton - atFuncs map[*ssa.Function]bool // address-taken functions (for presolver) - mapValues []nodeid // values of makemap objects (indirect in HVN) - work nodeset // solver's worklist - result *Result // results of the analysis - track track // pointerlike types whose aliasing we track - deltaSpace []int // working space for iterating over PTS deltas - - // Reflection & intrinsics: - hasher typeutil.Hasher // cache of type hashes - reflectValueObj types.Object // type symbol for reflect.Value (if present) - reflectValueCall *ssa.Function // (reflect.Value).Call - reflectRtypeObj types.Object // *types.TypeName for reflect.rtype (if present) - reflectRtypePtr *types.Pointer // *reflect.rtype - reflectType *types.Named // reflect.Type - rtypes typeutil.Map // nodeid of canonical *rtype-tagged object for type T - reflectZeros typeutil.Map // nodeid of canonical T-tagged object for zero value - runtimeSetFinalizer *ssa.Function // runtime.SetFinalizer -} - -// enclosingObj returns the first node of the addressable memory -// object that encloses node id. Panic ensues if that node does not -// belong to any object. -func (a *analysis) enclosingObj(id nodeid) nodeid { - // Find previous node with obj != nil. - for i := id; i >= 0; i-- { - n := a.nodes[i] - if obj := n.obj; obj != nil { - if i+nodeid(obj.size) <= id { - break // out of bounds - } - return i - } - } - panic("node has no enclosing object") -} - -// labelFor returns the Label for node id. -// Panic ensues if that node is not addressable. -func (a *analysis) labelFor(id nodeid) *Label { - return &Label{ - obj: a.nodes[a.enclosingObj(id)].obj, - subelement: a.nodes[id].subelement, - } -} - -func (a *analysis) warnf(pos token.Pos, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if a.log != nil { - fmt.Fprintf(a.log, "%s: warning: %s\n", a.prog.Fset.Position(pos), msg) - } - a.result.Warnings = append(a.result.Warnings, Warning{pos, msg}) -} - -// computeTrackBits sets a.track to the necessary 'track' bits for the pointer queries. -func (a *analysis) computeTrackBits() { - if len(a.config.extendedQueries) != 0 { - // TODO(dh): only track the types necessary for the query. - a.track = trackAll - return - } - var queryTypes []types.Type - for v := range a.config.Queries { - queryTypes = append(queryTypes, v.Type()) - } - for v := range a.config.IndirectQueries { - queryTypes = append(queryTypes, mustDeref(v.Type())) - } - for _, t := range queryTypes { - switch t.Underlying().(type) { - case *types.Chan: - a.track |= trackChan - case *types.Map: - a.track |= trackMap - case *types.Pointer: - a.track |= trackPtr - case *types.Slice: - a.track |= trackSlice - case *types.Interface: - a.track = trackAll - return - } - if rVObj := a.reflectValueObj; rVObj != nil && types.Identical(t, rVObj.Type()) { - a.track = trackAll - return - } - } -} - -// Analyze runs the pointer analysis with the scope and options -// specified by config, and returns the (synthetic) root of the callgraph. -// -// Pointer analysis of a transitively closed well-typed program should -// always succeed. An error can occur only due to an internal bug. -func Analyze(config *Config) (result *Result, err error) { - if config.Mains == nil { - return nil, fmt.Errorf("no main/test packages to analyze (check $GOROOT/$GOPATH)") - } - defer func() { - if p := recover(); p != nil { - err = fmt.Errorf("internal error in pointer analysis: %v (please report this bug)", p) - fmt.Fprintln(os.Stderr, "Internal panic in pointer analysis:") - debug.PrintStack() - } - }() - - a := &analysis{ - config: config, - log: config.Log, - prog: config.prog(), - globalval: make(map[ssa.Value]nodeid), - globalobj: make(map[ssa.Value]nodeid), - flattenMemo: make(map[types.Type][]*fieldInfo), - trackTypes: make(map[types.Type]bool), - atFuncs: make(map[*ssa.Function]bool), - hasher: typeutil.MakeHasher(), - intrinsics: make(map[*ssa.Function]intrinsic), - result: &Result{ - Queries: make(map[ssa.Value]Pointer), - IndirectQueries: make(map[ssa.Value]Pointer), - }, - deltaSpace: make([]int, 0, 100), - } - - if false { - a.log = os.Stderr // for debugging crashes; extremely verbose - } - - if a.log != nil { - fmt.Fprintln(a.log, "==== Starting analysis") - } - - // Pointer analysis requires a complete program for soundness. - // Check to prevent accidental misconfiguration. - for _, pkg := range a.prog.AllPackages() { - // (This only checks that the package scope is complete, - // not that func bodies exist, but it's a good signal.) - if !pkg.Pkg.Complete() { - return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete`, pkg.Pkg.Path()) - } - } - - if reflect := a.prog.ImportedPackage("reflect"); reflect != nil { - rV := reflect.Pkg.Scope().Lookup("Value") - a.reflectValueObj = rV - a.reflectValueCall = a.prog.LookupMethod(rV.Type(), nil, "Call") - a.reflectType = reflect.Pkg.Scope().Lookup("Type").Type().(*types.Named) - a.reflectRtypeObj = reflect.Pkg.Scope().Lookup("rtype") - a.reflectRtypePtr = types.NewPointer(a.reflectRtypeObj.Type()) - - // Override flattening of reflect.Value, treating it like a basic type. - tReflectValue := a.reflectValueObj.Type() - a.flattenMemo[tReflectValue] = []*fieldInfo{{typ: tReflectValue}} - - // Override shouldTrack of reflect.Value and *reflect.rtype. - // Always track pointers of these types. - a.trackTypes[tReflectValue] = true - a.trackTypes[a.reflectRtypePtr] = true - - a.rtypes.SetHasher(a.hasher) - a.reflectZeros.SetHasher(a.hasher) - } - if runtime := a.prog.ImportedPackage("runtime"); runtime != nil { - a.runtimeSetFinalizer = runtime.Func("SetFinalizer") - } - a.computeTrackBits() - - a.generate() - a.showCounts() - - if optRenumber { - a.renumber() - } - - N := len(a.nodes) // excludes solver-created nodes - - if optHVN { - if debugHVNCrossCheck { - // Cross-check: run the solver once without - // optimization, once with, and compare the - // solutions. - savedConstraints := a.constraints - - a.solve() - a.dumpSolution("A.pts", N) - - // Restore. - a.constraints = savedConstraints - for _, n := range a.nodes { - n.solve = new(solverState) - } - a.nodes = a.nodes[:N] - - // rtypes is effectively part of the solver state. - a.rtypes = typeutil.Map{} - a.rtypes.SetHasher(a.hasher) - } - - a.hvn() - } - - if debugHVNCrossCheck { - runtime.GC() - runtime.GC() - } - - a.solve() - - // Compare solutions. - if optHVN && debugHVNCrossCheck { - a.dumpSolution("B.pts", N) - - if !diff("A.pts", "B.pts") { - return nil, fmt.Errorf("internal error: optimization changed solution") - } - } - - // Create callgraph.Nodes in deterministic order. - if cg := a.result.CallGraph; cg != nil { - for _, caller := range a.cgnodes { - cg.CreateNode(caller.fn) - } - } - - // Add dynamic edges to call graph. - var space [100]int - for _, caller := range a.cgnodes { - for _, site := range caller.sites { - for _, callee := range a.nodes[site.targets].solve.pts.AppendTo(space[:0]) { - a.callEdge(caller, site, nodeid(callee)) - } - } - } - - return a.result, nil -} - -// callEdge is called for each edge in the callgraph. -// calleeid is the callee's object node (has otFunction flag). -func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) { - obj := a.nodes[calleeid].obj - if obj.flags&otFunction == 0 { - panic(fmt.Sprintf("callEdge %s -> n%d: not a function object", site, calleeid)) - } - callee := obj.cgn - - if cg := a.result.CallGraph; cg != nil { - // TODO(adonovan): opt: I would expect duplicate edges - // (to wrappers) to arise due to the elimination of - // context information, but I haven't observed any. - // Understand this better. - callgraph.AddEdge(cg.CreateNode(caller.fn), site.instr, cg.CreateNode(callee.fn)) - } - - if a.log != nil { - fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee) - } - - // Warn about calls to functions that are handled unsoundly. - // TODO(adonovan): de-dup these messages. - fn := callee.fn - - // Warn about calls to non-intrinsic external functions. - if fn.Blocks == nil && a.findIntrinsic(fn) == nil { - a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn) - a.warnf(fn.Pos(), " (declared here)") - } - - // Warn about calls to generic function bodies. - if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 { - a.warnf(site.pos(), "unsound call to generic function body: %s (build with ssa.InstantiateGenerics)", fn) - a.warnf(fn.Pos(), " (declared here)") - } - - // Warn about calls to instantiation wrappers of generics functions. - if fn.Origin() != nil && strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") { - a.warnf(site.pos(), "unsound call to instantiation wrapper of generic: %s (build with ssa.InstantiateGenerics)", fn) - a.warnf(fn.Pos(), " (declared here)") - } -} - -// dumpSolution writes the PTS solution to the specified file. -// -// It only dumps the nodes that existed before solving. The order in -// which solver-created nodes are created depends on pre-solver -// optimization, so we can't include them in the cross-check. -func (a *analysis) dumpSolution(filename string, N int) { - f, err := os.Create(filename) - if err != nil { - panic(err) - } - for id, n := range a.nodes[:N] { - if _, err := fmt.Fprintf(f, "pts(n%d) = {", id); err != nil { - panic(err) - } - var sep string - for _, l := range n.solve.pts.AppendTo(a.deltaSpace) { - if l >= N { - break - } - fmt.Fprintf(f, "%s%d", sep, l) - sep = " " - } - fmt.Fprintf(f, "} : %s\n", n.typ) - } - if err := f.Close(); err != nil { - panic(err) - } -} - -// showCounts logs the size of the constraint system. A typical -// optimized distribution is 65% copy, 13% load, 11% addr, 5% -// offsetAddr, 4% store, 2% others. -func (a *analysis) showCounts() { - if a.log != nil { - counts := make(map[reflect.Type]int) - for _, c := range a.constraints { - counts[reflect.TypeOf(c)]++ - } - fmt.Fprintf(a.log, "# constraints:\t%d\n", len(a.constraints)) - var lines []string - for t, n := range counts { - line := fmt.Sprintf("%7d (%2d%%)\t%s", n, 100*n/len(a.constraints), t) - lines = append(lines, line) - } - sort.Sort(sort.Reverse(sort.StringSlice(lines))) - for _, line := range lines { - fmt.Fprintf(a.log, "\t%s\n", line) - } - - fmt.Fprintf(a.log, "# nodes:\t%d\n", len(a.nodes)) - - // Show number of pointer equivalence classes. - m := make(map[*solverState]bool) - for _, n := range a.nodes { - m[n.solve] = true - } - fmt.Fprintf(a.log, "# ptsets:\t%d\n", len(m)) - } -} diff --git a/go/pointer/api.go b/go/pointer/api.go deleted file mode 100644 index 8c9a8c7752b..00000000000 --- a/go/pointer/api.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import ( - "bytes" - "fmt" - "go/token" - "io" - - "golang.org/x/tools/container/intsets" - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/types/typeutil" -) - -// A Config formulates a pointer analysis problem for Analyze. It is -// only usable for a single invocation of Analyze and must not be -// reused. -type Config struct { - // Mains contains the set of 'main' packages to analyze - // Clients must provide the analysis with at least one - // package defining a main() function. - // - // Non-main packages in the ssa.Program that are not - // dependencies of any main package may still affect the - // analysis result, because they contribute runtime types and - // thus methods. - // - // TODO(adonovan): investigate whether this is desirable. - // - // Calls to generic functions will be unsound unless packages - // are built using the ssa.InstantiateGenerics builder mode. - Mains []*ssa.Package - - // Reflection determines whether to handle reflection - // operators soundly, which is currently rather slow since it - // causes constraint to be generated during solving - // proportional to the number of constraint variables, which - // has not yet been reduced by presolver optimisation. - Reflection bool - - // BuildCallGraph determines whether to construct a callgraph. - // If enabled, the graph will be available in Result.CallGraph. - BuildCallGraph bool - - // The client populates Queries[v] or IndirectQueries[v] - // for each ssa.Value v of interest, to request that the - // points-to sets pts(v) or pts(*v) be computed. If the - // client needs both points-to sets, v may appear in both - // maps. - // - // (IndirectQueries is typically used for Values corresponding - // to source-level lvalues, e.g. an *ssa.Global.) - // - // The analysis populates the corresponding - // Result.{Indirect,}Queries map when it creates the pointer - // variable for v or *v. Upon completion the client can - // inspect that map for the results. - // - // TODO(adonovan): this API doesn't scale well for batch tools - // that want to dump the entire solution. Perhaps optionally - // populate a map[*ssa.DebugRef]Pointer in the Result, one - // entry per source expression. - // - Queries map[ssa.Value]struct{} - IndirectQueries map[ssa.Value]struct{} - extendedQueries map[ssa.Value][]*extendedQuery - - // If Log is non-nil, log messages are written to it. - // Logging is extremely verbose. - Log io.Writer -} - -type track uint32 - -const ( - trackChan track = 1 << iota // track 'chan' references - trackMap // track 'map' references - trackPtr // track regular pointers - trackSlice // track slice references - - trackAll = ^track(0) -) - -// AddQuery adds v to Config.Queries. -// Precondition: CanPoint(v.Type()). -func (c *Config) AddQuery(v ssa.Value) { - if !CanPoint(v.Type()) { - panic(fmt.Sprintf("%s is not a pointer-like value: %s", v, v.Type())) - } - if c.Queries == nil { - c.Queries = make(map[ssa.Value]struct{}) - } - c.Queries[v] = struct{}{} -} - -// AddIndirectQuery adds v to Config.IndirectQueries. -// Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()). -func (c *Config) AddIndirectQuery(v ssa.Value) { - if c.IndirectQueries == nil { - c.IndirectQueries = make(map[ssa.Value]struct{}) - } - if !CanPoint(mustDeref(v.Type())) { - panic(fmt.Sprintf("%s is not the address of a pointer-like value: %s", v, v.Type())) - } - c.IndirectQueries[v] = struct{}{} -} - -// AddExtendedQuery adds an extended, AST-based query on v to the -// analysis. The query, which must be a single Go expression, allows -// destructuring the value. -// -// The query must operate on a variable named 'x', which represents -// the value, and result in a pointer-like object. Only a subset of -// Go expressions are permitted in queries, namely channel receives, -// pointer dereferences, field selectors, array/slice/map/tuple -// indexing and grouping with parentheses. The specific indices when -// indexing arrays, slices and maps have no significance. Indices used -// on tuples must be numeric and within bounds. -// -// All field selectors must be explicit, even ones usually elided -// due to promotion of embedded fields. -// -// The query 'x' is identical to using AddQuery. The query '*x' is -// identical to using AddIndirectQuery. -// -// On success, AddExtendedQuery returns a Pointer to the queried -// value. This Pointer will be initialized during analysis. Using it -// before analysis has finished has undefined behavior. -// -// Example: -// -// // given v, which represents a function call to 'fn() (int, []*T)', and -// // 'type T struct { F *int }', the following query will access the field F. -// c.AddExtendedQuery(v, "x[1][0].F") -func (c *Config) AddExtendedQuery(v ssa.Value, query string) (*Pointer, error) { - ops, _, err := parseExtendedQuery(v.Type(), query) - if err != nil { - return nil, fmt.Errorf("invalid query %q: %s", query, err) - } - if c.extendedQueries == nil { - c.extendedQueries = make(map[ssa.Value][]*extendedQuery) - } - - ptr := &Pointer{} - c.extendedQueries[v] = append(c.extendedQueries[v], &extendedQuery{ops: ops, ptr: ptr}) - return ptr, nil -} - -func (c *Config) prog() *ssa.Program { - for _, main := range c.Mains { - return main.Prog - } - panic("empty scope") -} - -type Warning struct { - Pos token.Pos - Message string -} - -// A Result contains the results of a pointer analysis. -// -// See Config for how to request the various Result components. -type Result struct { - CallGraph *callgraph.Graph // discovered call graph - Queries map[ssa.Value]Pointer // pts(v) for each v in Config.Queries. - IndirectQueries map[ssa.Value]Pointer // pts(*v) for each v in Config.IndirectQueries. - Warnings []Warning // warnings of unsoundness -} - -// A Pointer is an equivalence class of pointer-like values. -// -// A Pointer doesn't have a unique type because pointers of distinct -// types may alias the same object. -type Pointer struct { - a *analysis - n nodeid -} - -// A PointsToSet is a set of labels (locations or allocations). -type PointsToSet struct { - a *analysis // may be nil if pts is nil - pts *nodeset -} - -func (s PointsToSet) String() string { - var buf bytes.Buffer - buf.WriteByte('[') - if s.pts != nil { - var space [50]int - for i, l := range s.pts.AppendTo(space[:0]) { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(s.a.labelFor(nodeid(l)).String()) - } - } - buf.WriteByte(']') - return buf.String() -} - -// PointsTo returns the set of labels that this points-to set -// contains. -func (s PointsToSet) Labels() []*Label { - var labels []*Label - if s.pts != nil { - var space [50]int - for _, l := range s.pts.AppendTo(space[:0]) { - labels = append(labels, s.a.labelFor(nodeid(l))) - } - } - return labels -} - -// If this PointsToSet came from a Pointer of interface kind -// or a reflect.Value, DynamicTypes returns the set of dynamic -// types that it may contain. (For an interface, they will -// always be concrete types.) -// -// The result is a mapping whose keys are the dynamic types to which -// it may point. For each pointer-like key type, the corresponding -// map value is the PointsToSet for pointers of that type. -// -// The result is empty unless CanHaveDynamicTypes(T). -func (s PointsToSet) DynamicTypes() *typeutil.Map { - var tmap typeutil.Map - tmap.SetHasher(s.a.hasher) - if s.pts != nil { - var space [50]int - for _, x := range s.pts.AppendTo(space[:0]) { - ifaceObjID := nodeid(x) - if !s.a.isTaggedObject(ifaceObjID) { - continue // !CanHaveDynamicTypes(tDyn) - } - tDyn, v, indirect := s.a.taggedValue(ifaceObjID) - if indirect { - panic("indirect tagged object") // implement later - } - pts, ok := tmap.At(tDyn).(PointsToSet) - if !ok { - pts = PointsToSet{s.a, new(nodeset)} - tmap.Set(tDyn, pts) - } - pts.pts.addAll(&s.a.nodes[v].solve.pts) - } - } - return &tmap -} - -// Intersects reports whether this points-to set and the -// argument points-to set contain common members. -func (s PointsToSet) Intersects(y PointsToSet) bool { - if s.pts == nil || y.pts == nil { - return false - } - // This takes Θ(|x|+|y|) time. - var z intsets.Sparse - z.Intersection(&s.pts.Sparse, &y.pts.Sparse) - return !z.IsEmpty() -} - -func (p Pointer) String() string { - return fmt.Sprintf("n%d", p.n) -} - -// PointsTo returns the points-to set of this pointer. -func (p Pointer) PointsTo() PointsToSet { - if p.n == 0 { - return PointsToSet{} - } - return PointsToSet{p.a, &p.a.nodes[p.n].solve.pts} -} - -// MayAlias reports whether the receiver pointer may alias -// the argument pointer. -func (p Pointer) MayAlias(q Pointer) bool { - return p.PointsTo().Intersects(q.PointsTo()) -} - -// DynamicTypes returns p.PointsTo().DynamicTypes(). -func (p Pointer) DynamicTypes() *typeutil.Map { - return p.PointsTo().DynamicTypes() -} diff --git a/go/pointer/callgraph.go b/go/pointer/callgraph.go deleted file mode 100644 index 0b7aba52aa2..00000000000 --- a/go/pointer/callgraph.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file defines the internal (context-sensitive) call graph. - -import ( - "fmt" - "go/token" - - "golang.org/x/tools/go/ssa" -) - -type cgnode struct { - fn *ssa.Function - obj nodeid // start of this contour's object block - sites []*callsite // ordered list of callsites within this function - callersite *callsite // where called from, if known; nil for shared contours -} - -// contour returns a description of this node's contour. -func (n *cgnode) contour() string { - if n.callersite == nil { - return "shared contour" - } - if n.callersite.instr != nil { - return fmt.Sprintf("as called from %s", n.callersite.instr.Parent()) - } - return fmt.Sprintf("as called from intrinsic (targets=n%d)", n.callersite.targets) -} - -func (n *cgnode) String() string { - return fmt.Sprintf("cg%d:%s", n.obj, n.fn) -} - -// A callsite represents a single call site within a cgnode; -// it is implicitly context-sensitive. -// callsites never represent calls to built-ins; -// they are handled as intrinsics. -type callsite struct { - targets nodeid // pts(·) contains objects for dynamically called functions - instr ssa.CallInstruction // the call instruction; nil for synthetic/intrinsic -} - -func (c *callsite) String() string { - if c.instr != nil { - return c.instr.Common().Description() - } - return "synthetic function call" -} - -// pos returns the source position of this callsite, or token.NoPos if implicit. -func (c *callsite) pos() token.Pos { - if c.instr != nil { - return c.instr.Pos() - } - return token.NoPos -} diff --git a/go/pointer/constraint.go b/go/pointer/constraint.go deleted file mode 100644 index 54b54288a0d..00000000000 --- a/go/pointer/constraint.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import "go/types" - -type constraint interface { - // For a complex constraint, returns the nodeid of the pointer - // to which it is attached. For addr and copy, returns dst. - ptr() nodeid - - // renumber replaces each nodeid n in the constraint by mapping[n]. - renumber(mapping []nodeid) - - // presolve is a hook for constraint-specific behaviour during - // pre-solver optimization. Typical implementations mark as - // indirect the set of nodes to which the solver will add copy - // edges or PTS labels. - presolve(h *hvn) - - // solve is called for complex constraints when the pts for - // the node to which they are attached has changed. - solve(a *analysis, delta *nodeset) - - String() string -} - -// dst = &src -// pts(dst) ⊇ {src} -// A base constraint used to initialize the solver's pt sets -type addrConstraint struct { - dst nodeid // (ptr) - src nodeid -} - -func (c *addrConstraint) ptr() nodeid { return c.dst } -func (c *addrConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst = src -// A simple constraint represented directly as a copyTo graph edge. -type copyConstraint struct { - dst nodeid // (ptr) - src nodeid -} - -func (c *copyConstraint) ptr() nodeid { return c.dst } -func (c *copyConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst = src[offset] -// A complex constraint attached to src (the pointer) -type loadConstraint struct { - offset uint32 - dst nodeid - src nodeid // (ptr) -} - -func (c *loadConstraint) ptr() nodeid { return c.src } -func (c *loadConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst[offset] = src -// A complex constraint attached to dst (the pointer) -type storeConstraint struct { - offset uint32 - dst nodeid // (ptr) - src nodeid -} - -func (c *storeConstraint) ptr() nodeid { return c.dst } -func (c *storeConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst = &src.f or dst = &src[0] -// A complex constraint attached to dst (the pointer) -type offsetAddrConstraint struct { - offset uint32 - dst nodeid - src nodeid // (ptr) -} - -func (c *offsetAddrConstraint) ptr() nodeid { return c.src } -func (c *offsetAddrConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst = src.(typ) where typ is an interface -// A complex constraint attached to src (the interface). -// No representation change: pts(dst) and pts(src) contains tagged objects. -type typeFilterConstraint struct { - typ types.Type // an interface type - dst nodeid - src nodeid // (ptr) -} - -func (c *typeFilterConstraint) ptr() nodeid { return c.src } -func (c *typeFilterConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// dst = src.(typ) where typ is a concrete type -// A complex constraint attached to src (the interface). -// -// If exact, only tagged objects identical to typ are untagged. -// If !exact, tagged objects assignable to typ are untagged too. -// The latter is needed for various reflect operators, e.g. Send. -// -// This entails a representation change: -// pts(src) contains tagged objects, -// pts(dst) contains their payloads. -type untagConstraint struct { - typ types.Type // a concrete type - dst nodeid - src nodeid // (ptr) - exact bool -} - -func (c *untagConstraint) ptr() nodeid { return c.src } -func (c *untagConstraint) renumber(mapping []nodeid) { - c.dst = mapping[c.dst] - c.src = mapping[c.src] -} - -// src.method(params...) -// A complex constraint attached to iface. -type invokeConstraint struct { - method *types.Func // the abstract method - iface nodeid // (ptr) the interface - params nodeid // the start of the identity/params/results block -} - -func (c *invokeConstraint) ptr() nodeid { return c.iface } -func (c *invokeConstraint) renumber(mapping []nodeid) { - c.iface = mapping[c.iface] - c.params = mapping[c.params] -} diff --git a/go/pointer/doc.go b/go/pointer/doc.go deleted file mode 100644 index aca343b88e3..00000000000 --- a/go/pointer/doc.go +++ /dev/null @@ -1,625 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pointer implements Andersen's analysis, an inclusion-based -pointer analysis algorithm first described in (Andersen, 1994). - -A pointer analysis relates every pointer expression in a whole program -to the set of memory locations to which it might point. This -information can be used to construct a call graph of the program that -precisely represents the destinations of dynamic function and method -calls. It can also be used to determine, for example, which pairs of -channel operations operate on the same channel. - -The package allows the client to request a set of expressions of -interest for which the points-to information will be returned once the -analysis is complete. In addition, the client may request that a -callgraph is constructed. The example program in example_test.go -demonstrates both of these features. Clients should not request more -information than they need since it may increase the cost of the -analysis significantly. - -# CLASSIFICATION - -Our algorithm is INCLUSION-BASED: the points-to sets for x and y will -be related by pts(y) ⊇ pts(x) if the program contains the statement -y = x. - -It is FLOW-INSENSITIVE: it ignores all control flow constructs and the -order of statements in a program. It is therefore a "MAY ALIAS" -analysis: its facts are of the form "P may/may not point to L", -not "P must point to L". - -It is FIELD-SENSITIVE: it builds separate points-to sets for distinct -fields, such as x and y in struct { x, y *int }. - -It is mostly CONTEXT-INSENSITIVE: most functions are analyzed once, -so values can flow in at one call to the function and return out at -another. Only some smaller functions are analyzed with consideration -of their calling context. - -It has a CONTEXT-SENSITIVE HEAP: objects are named by both allocation -site and context, so the objects returned by two distinct calls to f: - - func f() *T { return new(T) } - -are distinguished up to the limits of the calling context. - -It is a WHOLE PROGRAM analysis: it requires SSA-form IR for the -complete Go program and summaries for native code. - -See the (Hind, PASTE'01) survey paper for an explanation of these terms. - -# SOUNDNESS - -The analysis is fully sound when invoked on pure Go programs that do not -use reflection or unsafe.Pointer conversions. In other words, if there -is any possible execution of the program in which pointer P may point to -object O, the analysis will report that fact. - -# REFLECTION - -By default, the "reflect" library is ignored by the analysis, as if all -its functions were no-ops, but if the client enables the Reflection flag, -the analysis will make a reasonable attempt to model the effects of -calls into this library. However, this comes at a significant -performance cost, and not all features of that library are yet -implemented. In addition, some simplifying approximations must be made -to ensure that the analysis terminates; for example, reflection can be -used to construct an infinite set of types and values of those types, -but the analysis arbitrarily bounds the depth of such types. - -Most but not all reflection operations are supported. -In particular, addressable reflect.Values are not yet implemented, so -operations such as (reflect.Value).Set have no analytic effect. - -# UNSAFE POINTER CONVERSIONS - -The pointer analysis makes no attempt to understand aliasing between the -operand x and result y of an unsafe.Pointer conversion: - - y = (*T)(unsafe.Pointer(x)) - -It is as if the conversion allocated an entirely new object: - - y = new(T) - -# NATIVE CODE - -The analysis cannot model the aliasing effects of functions written in -languages other than Go, such as runtime intrinsics in C or assembly, or -code accessed via cgo. The result is as if such functions are no-ops. -However, various important intrinsics are understood by the analysis, -along with built-ins such as append. - -The analysis currently provides no way for users to specify the aliasing -effects of native code. - ------------------------------------------------------------------------- - -# IMPLEMENTATION - -The remaining documentation is intended for package maintainers and -pointer analysis specialists. Maintainers should have a solid -understanding of the referenced papers (especially those by H&L and PKH) -before making making significant changes. - -The implementation is similar to that described in (Pearce et al, -PASTE'04). Unlike many algorithms which interleave constraint -generation and solving, constructing the callgraph as they go, this -implementation for the most part observes a phase ordering (generation -before solving), with only simple (copy) constraints being generated -during solving. (The exception is reflection, which creates various -constraints during solving as new types flow to reflect.Value -operations.) This improves the traction of presolver optimisations, -but imposes certain restrictions, e.g. potential context sensitivity -is limited since all variants must be created a priori. - -# TERMINOLOGY - -A type is said to be "pointer-like" if it is a reference to an object. -Pointer-like types include pointers and also interfaces, maps, channels, -functions and slices. - -We occasionally use C's x->f notation to distinguish the case where x -is a struct pointer from x.f where is a struct value. - -Pointer analysis literature (and our comments) often uses the notation -dst=*src+offset to mean something different than what it means in Go. -It means: for each node index p in pts(src), the node index p+offset is -in pts(dst). Similarly *dst+offset=src is used for store constraints -and dst=src+offset for offset-address constraints. - -# NODES - -Nodes are the key datastructure of the analysis, and have a dual role: -they represent both constraint variables (equivalence classes of -pointers) and members of points-to sets (things that can be pointed -at, i.e. "labels"). - -Nodes are naturally numbered. The numbering enables compact -representations of sets of nodes such as bitvectors (or BDDs); and the -ordering enables a very cheap way to group related nodes together. For -example, passing n parameters consists of generating n parallel -constraints from caller+i to callee+i for 0<=i y is added. - -ChangeInterface is a simple copy because the representation of -tagged objects is independent of the interface type (in contrast -to the "method tables" approach used by the gc runtime). - -y := Invoke x.m(...) is implemented by allocating contiguous P/R -blocks for the callsite and adding a dynamic rule triggered by each -tagged object added to pts(x). The rule adds param/results copy -edges to/from each discovered concrete method. - -(Q. Why do we model an interface as a pointer to a pair of type and -value, rather than as a pair of a pointer to type and a pointer to -value? -A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2}, -{V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and -type-unsafe combination (T1,V2). Treating the value and its concrete -type as inseparable makes the analysis type-safe.) - -Type parameters: - -Type parameters are not directly supported by the analysis. -Calls to generic functions will be left as if they had empty bodies. -Users of the package are expected to use the ssa.InstantiateGenerics -builder mode when building code that uses or depends on code -containing generics. - -reflect.Value: - -A reflect.Value is modelled very similar to an interface{}, i.e. as -a pointer exclusively to tagged objects, but with two generalizations. - -1. a reflect.Value that represents an lvalue points to an indirect -(obj.flags ⊇ {otIndirect}) tagged object, which has a similar -layout to an tagged object except that the value is a pointer to -the dynamic type. Indirect tagged objects preserve the correct -aliasing so that mutations made by (reflect.Value).Set can be -observed. - -Indirect objects only arise when an lvalue is derived from an -rvalue by indirection, e.g. the following code: - - type S struct { X T } - var s S - var i interface{} = &s // i points to a *S-tagged object (from MakeInterface) - v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i - v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s - v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X - v3.Set(y) // pts(s.X) ⊇ pts(y) - -Whether indirect or not, the concrete type of the tagged object -corresponds to the user-visible dynamic type, and the existence -of a pointer is an implementation detail. - -(NB: indirect tagged objects are not yet implemented) - -2. The dynamic type tag of a tagged object pointed to by a -reflect.Value may be an interface type; it need not be concrete. - -This arises in code such as this: - - tEface := reflect.TypeOf(new(interface{}).Elem() // interface{} - eface := reflect.Zero(tEface) - -pts(eface) is a singleton containing an interface{}-tagged -object. That tagged object's payload is an interface{} value, -i.e. the pts of the payload contains only concrete-tagged -objects, although in this example it's the zero interface{} value, -so its pts is empty. - -reflect.Type: - -Just as in the real "reflect" library, we represent a reflect.Type -as an interface whose sole implementation is the concrete type, -*reflect.rtype. (This choice is forced on us by go/types: clients -cannot fabricate types with arbitrary method sets.) - -rtype instances are canonical: there is at most one per dynamic -type. (rtypes are in fact large structs but since identity is all -that matters, we represent them by a single node.) - -The payload of each *rtype-tagged object is an *rtype pointer that -points to exactly one such canonical rtype object. We exploit this -by setting the node.typ of the payload to the dynamic type, not -'*rtype'. This saves us an indirection in each resolution rule. As -an optimisation, *rtype-tagged objects are canonicalized too. - -Aggregate types: - -Aggregate types are treated as if all directly contained -aggregates are recursively flattened out. - -Structs: - -*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset. - -*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create - - simple edges for each struct discovered in pts(x). - -The nodes of a struct consist of a special 'identity' node (whose -type is that of the struct itself), followed by the nodes for all -the struct's fields, recursively flattened out. A pointer to the -struct is a pointer to its identity node. That node allows us to -distinguish a pointer to a struct from a pointer to its first field. - -Field offsets are logical field offsets (plus one for the identity -node), so the sizes of the fields can be ignored by the analysis. - -(The identity node is non-traditional but enables the distinction -described above, which is valuable for code comprehension tools. -Typical pointer analyses for C, whose purpose is compiler -optimization, must soundly model unsafe.Pointer (void*) conversions, -and this requires fidelity to the actual memory layout using physical -field offsets.) - -*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset. - -*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create - - simple edges for each struct discovered in pts(x). - -Arrays: - -We model an array by an identity node (whose type is that of the -array itself) followed by a node representing all the elements of -the array; the analysis does not distinguish elements with different -indices. Effectively, an array is treated like struct{elem T}, a -load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the -index i is ignored. - -A pointer to an array is pointer to its identity node. (A slice is -also a pointer to an array's identity node.) The identity node -allows us to distinguish a pointer to an array from a pointer to one -of its elements, but it is rather costly because it introduces more -offset constraints into the system. Furthermore, sound treatment of -unsafe.Pointer would require us to dispense with this node. - -Arrays may be allocated by Alloc, by make([]T), by calls to append, -and via reflection. - -Tuples (T, ...): - -Tuples are treated like structs with naturally numbered fields. -*ssa.Extract is analogous to *ssa.Field. - -However, tuples have no identity field since by construction, they -cannot be address-taken. - -# FUNCTION CALLS - -There are three kinds of function call: - 1. static "call"-mode calls of functions. - 2. dynamic "call"-mode calls of functions. - 3. dynamic "invoke"-mode calls of interface methods. - -Cases 1 and 2 apply equally to methods and standalone functions. - -Static calls: - -A static call consists three steps: - - finding the function object of the callee; - - creating copy edges from the actual parameter value nodes to the - P-block in the function object (this includes the receiver if - the callee is a method); - - creating copy edges from the R-block in the function object to - the value nodes for the result of the call. - -A static function call is little more than two struct value copies -between the P/R blocks of caller and callee: - - callee.P = caller.P - caller.R = callee.R - -Context sensitivity: Static calls (alone) may be treated context sensitively, -i.e. each callsite may cause a distinct re-analysis of the -callee, improving precision. Our current context-sensitivity -policy treats all intrinsics and getter/setter methods in this -manner since such functions are small and seem like an obvious -source of spurious confluences, though this has not yet been -evaluated. - -Dynamic function calls: - -Dynamic calls work in a similar manner except that the creation of -copy edges occurs dynamically, in a similar fashion to a pair of -struct copies in which the callee is indirect: - - callee->P = caller.P - caller.R = callee->R - -(Recall that the function object's P- and R-blocks are contiguous.) - -Interface method invocation: - -For invoke-mode calls, we create a params/results block for the -callsite and attach a dynamic closure rule to the interface. For -each new tagged object that flows to the interface, we look up -the concrete method, find its function object, and connect its P/R -blocks to the callsite's P/R blocks, adding copy edges to the graph -during solving. - -Recording call targets: - -The analysis notifies its clients of each callsite it encounters, -passing a CallSite interface. Among other things, the CallSite -contains a synthetic constraint variable ("targets") whose -points-to solution includes the set of all function objects to -which the call may dispatch. - -It is via this mechanism that the callgraph is made available. -Clients may also elect to be notified of callgraph edges directly; -internally this just iterates all "targets" variables' pts(·)s. - -# PRESOLVER - -We implement Hash-Value Numbering (HVN), a pre-solver constraint -optimization described in Hardekopf & Lin, SAS'07. This is documented -in more detail in hvn.go. We intend to add its cousins HR and HU in -future. - -# SOLVER - -The solver is currently a naive Andersen-style implementation; it does -not perform online cycle detection, though we plan to add solver -optimisations such as Hybrid- and Lazy- Cycle Detection from (Hardekopf -& Lin, PLDI'07). - -It uses difference propagation (Pearce et al, SQC'04) to avoid -redundant re-triggering of closure rules for values already seen. - -Points-to sets are represented using sparse bit vectors (similar to -those used in LLVM and gcc), which are more space- and time-efficient -than sets based on Go's built-in map type or dense bit vectors. - -Nodes are permuted prior to solving so that object nodes (which may -appear in points-to sets) are lower numbered than non-object (var) -nodes. This improves the density of the set over which the PTSs -range, and thus the efficiency of the representation. - -Partly thanks to avoiding map iteration, the execution of the solver is -100% deterministic, a great help during debugging. - -# FURTHER READING - -Andersen, L. O. 1994. Program analysis and specialization for the C -programming language. Ph.D. dissertation. DIKU, University of -Copenhagen. - -David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Efficient -field-sensitive pointer analysis for C. In Proceedings of the 5th ACM -SIGPLAN-SIGSOFT workshop on Program analysis for software tools and -engineering (PASTE '04). ACM, New York, NY, USA, 37-42. -http://doi.acm.org/10.1145/996821.996835 - -David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Online -Cycle Detection and Difference Propagation: Applications to Pointer -Analysis. Software Quality Control 12, 4 (December 2004), 311-337. -http://dx.doi.org/10.1023/B:SQJO.0000039791.93071.a2 - -David Grove and Craig Chambers. 2001. A framework for call graph -construction algorithms. ACM Trans. Program. Lang. Syst. 23, 6 -(November 2001), 685-746. -http://doi.acm.org/10.1145/506315.506316 - -Ben Hardekopf and Calvin Lin. 2007. The ant and the grasshopper: fast -and accurate pointer analysis for millions of lines of code. In -Proceedings of the 2007 ACM SIGPLAN conference on Programming language -design and implementation (PLDI '07). ACM, New York, NY, USA, 290-299. -http://doi.acm.org/10.1145/1250734.1250767 - -Ben Hardekopf and Calvin Lin. 2007. Exploiting pointer and location -equivalence to optimize pointer analysis. In Proceedings of the 14th -international conference on Static Analysis (SAS'07), Hanne Riis -Nielson and Gilberto Filé (Eds.). Springer-Verlag, Berlin, Heidelberg, -265-280. - -Atanas Rountev and Satish Chandra. 2000. Off-line variable substitution -for scaling points-to analysis. In Proceedings of the ACM SIGPLAN 2000 -conference on Programming language design and implementation (PLDI '00). -ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310 -http://doi.acm.org/10.1145/349299.349310 -*/ -package pointer // import "golang.org/x/tools/go/pointer" diff --git a/go/pointer/example_test.go b/go/pointer/example_test.go deleted file mode 100644 index 00017df6e77..00000000000 --- a/go/pointer/example_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer_test - -import ( - "fmt" - "sort" - - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -// This program demonstrates how to use the pointer analysis to -// obtain a conservative call-graph of a Go program. -// It also shows how to compute the points-to set of a variable, -// in this case, (C).f's ch parameter. -func Example() { - const myprog = ` -package main - -import "fmt" - -type I interface { - f(map[string]int) -} - -type C struct{} - -func (C) f(m map[string]int) { - fmt.Println("C.f()") -} - -func main() { - var i I = C{} - x := map[string]int{"one":1} - i.f(x) // dynamic method call -} -` - var conf loader.Config - - // Parse the input file, a string. - // (Command-line tools should use conf.FromArgs.) - file, err := conf.ParseFile("myprog.go", myprog) - if err != nil { - fmt.Print(err) // parse error - return - } - - // Create single-file main package and import its dependencies. - conf.CreateFromFiles("main", file) - - iprog, err := conf.Load() - if err != nil { - fmt.Print(err) // type error in some package - return - } - - // Create SSA-form program representation. - prog := ssautil.CreateProgram(iprog, ssa.InstantiateGenerics) - mainPkg := prog.Package(iprog.Created[0].Pkg) - - // Build SSA code for bodies of all functions in the whole program. - prog.Build() - - // Configure the pointer analysis to build a call-graph. - config := &pointer.Config{ - Mains: []*ssa.Package{mainPkg}, - BuildCallGraph: true, - } - - // Query points-to set of (C).f's parameter m, a map. - C := mainPkg.Type("C").Type() - Cfm := prog.LookupMethod(C, mainPkg.Pkg, "f").Params[1] - config.AddQuery(Cfm) - - // Run the pointer analysis. - result, err := pointer.Analyze(config) - if err != nil { - panic(err) // internal error in pointer analysis - } - - // Find edges originating from the main package. - // By converting to strings, we de-duplicate nodes - // representing the same function due to context sensitivity. - var edges []string - callgraph.GraphVisitEdges(result.CallGraph, func(edge *callgraph.Edge) error { - caller := edge.Caller.Func - if caller.Pkg == mainPkg { - edges = append(edges, fmt.Sprint(caller, " --> ", edge.Callee.Func)) - } - return nil - }) - - // Print the edges in sorted order. - sort.Strings(edges) - for _, edge := range edges { - fmt.Println(edge) - } - fmt.Println() - - // Print the labels of (C).f(m)'s points-to set. - fmt.Println("m may point to:") - var labels []string - for _, l := range result.Queries[Cfm].PointsTo().Labels() { - label := fmt.Sprintf(" %s: %s", prog.Fset.Position(l.Pos()), l) - labels = append(labels, label) - } - sort.Strings(labels) - for _, label := range labels { - fmt.Println(label) - } - - // Output: - // (main.C).f --> fmt.Println - // main.init --> fmt.init - // main.main --> (main.C).f - // - // m may point to: - // myprog.go:18:21: makemap -} diff --git a/go/pointer/gen.go b/go/pointer/gen.go deleted file mode 100644 index 5e527f21ab2..00000000000 --- a/go/pointer/gen.go +++ /dev/null @@ -1,1364 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file defines the constraint generation phase. - -// TODO(adonovan): move the constraint definitions and the store() etc -// functions which add them (and are also used by the solver) into a -// new file, constraints.go. - -import ( - "fmt" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/internal/typeparams" -) - -var ( - tEface = types.NewInterfaceType(nil, nil).Complete() - tInvalid = types.Typ[types.Invalid] - tUnsafePtr = types.Typ[types.UnsafePointer] -) - -// ---------- Node creation ---------- - -// nextNode returns the index of the next unused node. -func (a *analysis) nextNode() nodeid { - return nodeid(len(a.nodes)) -} - -// addNodes creates nodes for all scalar elements in type typ, and -// returns the id of the first one, or zero if the type was -// analytically uninteresting. -// -// comment explains the origin of the nodes, as a debugging aid. -func (a *analysis) addNodes(typ types.Type, comment string) nodeid { - id := a.nextNode() - for _, fi := range a.flatten(typ) { - a.addOneNode(fi.typ, comment, fi) - } - if id == a.nextNode() { - return 0 // type contained no pointers - } - return id -} - -// addOneNode creates a single node with type typ, and returns its id. -// -// typ should generally be scalar (except for tagged.T nodes -// and struct/array identity nodes). Use addNodes for non-scalar types. -// -// comment explains the origin of the nodes, as a debugging aid. -// subelement indicates the subelement, e.g. ".a.b[*].c". -func (a *analysis) addOneNode(typ types.Type, comment string, subelement *fieldInfo) nodeid { - id := a.nextNode() - a.nodes = append(a.nodes, &node{typ: typ, subelement: subelement, solve: new(solverState)}) - if a.log != nil { - fmt.Fprintf(a.log, "\tcreate n%d %s for %s%s\n", - id, typ, comment, subelement.path()) - } - return id -} - -// setValueNode associates node id with the value v. -// cgn identifies the context iff v is a local variable. -func (a *analysis) setValueNode(v ssa.Value, id nodeid, cgn *cgnode) { - if cgn != nil { - a.localval[v] = id - } else { - a.globalval[v] = id - } - if a.log != nil { - fmt.Fprintf(a.log, "\tval[%s] = n%d (%T)\n", v.Name(), id, v) - } - - // Due to context-sensitivity, we may encounter the same Value - // in many contexts. We merge them to a canonical node, since - // that's what all clients want. - - // Record the (v, id) relation if the client has queried pts(v). - if _, ok := a.config.Queries[v]; ok { - t := v.Type() - ptr, ok := a.result.Queries[v] - if !ok { - // First time? Create the canonical query node. - ptr = Pointer{a, a.addNodes(t, "query")} - a.result.Queries[v] = ptr - } - a.result.Queries[v] = ptr - a.copy(ptr.n, id, a.sizeof(t)) - } - - // Record the (*v, id) relation if the client has queried pts(*v). - if _, ok := a.config.IndirectQueries[v]; ok { - t := v.Type() - ptr, ok := a.result.IndirectQueries[v] - if !ok { - // First time? Create the canonical indirect query node. - ptr = Pointer{a, a.addNodes(v.Type(), "query.indirect")} - a.result.IndirectQueries[v] = ptr - } - a.genLoad(cgn, ptr.n, v, 0, a.sizeof(t)) - } - - for _, query := range a.config.extendedQueries[v] { - t, nid := a.evalExtendedQuery(v.Type().Underlying(), id, query.ops) - - if query.ptr.a == nil { - query.ptr.a = a - query.ptr.n = a.addNodes(t, "query.extended") - } - a.copy(query.ptr.n, nid, a.sizeof(t)) - } -} - -// endObject marks the end of a sequence of calls to addNodes denoting -// a single object allocation. -// -// obj is the start node of the object, from a prior call to nextNode. -// Its size, flags and optional data will be updated. -func (a *analysis) endObject(obj nodeid, cgn *cgnode, data interface{}) *object { - // Ensure object is non-empty by padding; - // the pad will be the object node. - size := uint32(a.nextNode() - obj) - if size == 0 { - a.addOneNode(tInvalid, "padding", nil) - } - objNode := a.nodes[obj] - o := &object{ - size: size, // excludes padding - cgn: cgn, - data: data, - } - objNode.obj = o - - return o -} - -// makeFunctionObject creates and returns a new function object -// (contour) for fn, and returns the id of its first node. It also -// enqueues fn for subsequent constraint generation. -// -// For a context-sensitive contour, callersite identifies the sole -// callsite; for shared contours, caller is nil. -func (a *analysis) makeFunctionObject(fn *ssa.Function, callersite *callsite) nodeid { - if a.log != nil { - fmt.Fprintf(a.log, "\t---- makeFunctionObject %s\n", fn) - } - - // obj is the function object (identity, params, results). - obj := a.nextNode() - cgn := a.makeCGNode(fn, obj, callersite) - sig := fn.Signature - a.addOneNode(sig, "func.cgnode", nil) // (scalar with Signature type) - if recv := sig.Recv(); recv != nil { - a.addNodes(recv.Type(), "func.recv") - } - a.addNodes(sig.Params(), "func.params") - a.addNodes(sig.Results(), "func.results") - a.endObject(obj, cgn, fn).flags |= otFunction - - if a.log != nil { - fmt.Fprintf(a.log, "\t----\n") - } - - // Queue it up for constraint processing. - a.genq = append(a.genq, cgn) - - return obj -} - -// makeTagged creates a tagged object of type typ. -func (a *analysis) makeTagged(typ types.Type, cgn *cgnode, data interface{}) nodeid { - obj := a.addOneNode(typ, "tagged.T", nil) // NB: type may be non-scalar! - a.addNodes(typ, "tagged.v") - a.endObject(obj, cgn, data).flags |= otTagged - return obj -} - -// makeRtype returns the canonical tagged object of type *rtype whose -// payload points to the sole rtype object for T. -// -// TODO(adonovan): move to reflect.go; it's part of the solver really. -func (a *analysis) makeRtype(T types.Type) nodeid { - if v := a.rtypes.At(T); v != nil { - return v.(nodeid) - } - - // Create the object for the reflect.rtype itself, which is - // ordinarily a large struct but here a single node will do. - obj := a.nextNode() - a.addOneNode(T, "reflect.rtype", nil) - a.endObject(obj, nil, T) - - id := a.makeTagged(a.reflectRtypePtr, nil, T) - a.nodes[id+1].typ = T // trick (each *rtype tagged object is a singleton) - a.addressOf(a.reflectRtypePtr, id+1, obj) - - a.rtypes.Set(T, id) - return id -} - -// rtypeTaggedValue returns the type of the *reflect.rtype-tagged object obj. -func (a *analysis) rtypeTaggedValue(obj nodeid) types.Type { - tDyn, t, _ := a.taggedValue(obj) - if tDyn != a.reflectRtypePtr { - panic(fmt.Sprintf("not a *reflect.rtype-tagged object: obj=n%d tag=%v payload=n%d", obj, tDyn, t)) - } - return a.nodes[t].typ -} - -// valueNode returns the id of the value node for v, creating it (and -// the association) as needed. It may return zero for uninteresting -// values containing no pointers. -func (a *analysis) valueNode(v ssa.Value) nodeid { - // Value nodes for locals are created en masse by genFunc. - if id, ok := a.localval[v]; ok { - return id - } - - // Value nodes for globals are created on demand. - id, ok := a.globalval[v] - if !ok { - var comment string - if a.log != nil { - comment = v.String() - } - id = a.addNodes(v.Type(), comment) - if obj := a.objectNode(nil, v); obj != 0 { - a.addressOf(v.Type(), id, obj) - } - a.setValueNode(v, id, nil) - } - return id -} - -// valueOffsetNode ascertains the node for tuple/struct value v, -// then returns the node for its subfield #index. -func (a *analysis) valueOffsetNode(v ssa.Value, index int) nodeid { - id := a.valueNode(v) - if id == 0 { - panic(fmt.Sprintf("cannot offset within n0: %s = %s", v.Name(), v)) - } - return id + nodeid(a.offsetOf(v.Type(), index)) -} - -// isTaggedObject reports whether object obj is a tagged object. -func (a *analysis) isTaggedObject(obj nodeid) bool { - return a.nodes[obj].obj.flags&otTagged != 0 -} - -// taggedValue returns the dynamic type tag, the (first node of the) -// payload, and the indirect flag of the tagged object starting at id. -// Panic ensues if !isTaggedObject(id). -func (a *analysis) taggedValue(obj nodeid) (tDyn types.Type, v nodeid, indirect bool) { - n := a.nodes[obj] - flags := n.obj.flags - if flags&otTagged == 0 { - panic(fmt.Sprintf("not a tagged object: n%d", obj)) - } - return n.typ, obj + 1, flags&otIndirect != 0 -} - -// funcParams returns the first node of the params (P) block of the -// function whose object node (obj.flags&otFunction) is id. -func (a *analysis) funcParams(id nodeid) nodeid { - n := a.nodes[id] - if n.obj == nil || n.obj.flags&otFunction == 0 { - panic(fmt.Sprintf("funcParams(n%d): not a function object block", id)) - } - return id + 1 -} - -// funcResults returns the first node of the results (R) block of the -// function whose object node (obj.flags&otFunction) is id. -func (a *analysis) funcResults(id nodeid) nodeid { - n := a.nodes[id] - if n.obj == nil || n.obj.flags&otFunction == 0 { - panic(fmt.Sprintf("funcResults(n%d): not a function object block", id)) - } - sig := n.typ.(*types.Signature) - id += 1 + nodeid(a.sizeof(sig.Params())) - if sig.Recv() != nil { - id += nodeid(a.sizeof(sig.Recv().Type())) - } - return id -} - -// ---------- Constraint creation ---------- - -// copy creates a constraint of the form dst = src. -// sizeof is the width (in logical fields) of the copied type. -func (a *analysis) copy(dst, src nodeid, sizeof uint32) { - if src == dst || sizeof == 0 { - return // trivial - } - if src == 0 || dst == 0 { - panic(fmt.Sprintf("ill-typed copy dst=n%d src=n%d", dst, src)) - } - for i := uint32(0); i < sizeof; i++ { - a.addConstraint(©Constraint{dst, src}) - src++ - dst++ - } -} - -// addressOf creates a constraint of the form id = &obj. -// T is the type of the address. -func (a *analysis) addressOf(T types.Type, id, obj nodeid) { - if id == 0 { - panic("addressOf: zero id") - } - if obj == 0 { - panic("addressOf: zero obj") - } - if a.shouldTrack(T) { - a.addConstraint(&addrConstraint{id, obj}) - } -} - -// load creates a load constraint of the form dst = src[offset]. -// offset is the pointer offset in logical fields. -// sizeof is the width (in logical fields) of the loaded type. -func (a *analysis) load(dst, src nodeid, offset, sizeof uint32) { - if dst == 0 { - return // load of non-pointerlike value - } - if src == 0 && dst == 0 { - return // non-pointerlike operation - } - if src == 0 || dst == 0 { - panic(fmt.Sprintf("ill-typed load dst=n%d src=n%d", dst, src)) - } - for i := uint32(0); i < sizeof; i++ { - a.addConstraint(&loadConstraint{offset, dst, src}) - offset++ - dst++ - } -} - -// store creates a store constraint of the form dst[offset] = src. -// offset is the pointer offset in logical fields. -// sizeof is the width (in logical fields) of the stored type. -func (a *analysis) store(dst, src nodeid, offset uint32, sizeof uint32) { - if src == 0 { - return // store of non-pointerlike value - } - if src == 0 && dst == 0 { - return // non-pointerlike operation - } - if src == 0 || dst == 0 { - panic(fmt.Sprintf("ill-typed store dst=n%d src=n%d", dst, src)) - } - for i := uint32(0); i < sizeof; i++ { - a.addConstraint(&storeConstraint{offset, dst, src}) - offset++ - src++ - } -} - -// offsetAddr creates an offsetAddr constraint of the form dst = &src.#offset. -// offset is the field offset in logical fields. -// T is the type of the address. -func (a *analysis) offsetAddr(T types.Type, dst, src nodeid, offset uint32) { - if !a.shouldTrack(T) { - return - } - if offset == 0 { - // Simplify dst = &src->f0 - // to dst = src - // (NB: this optimisation is defeated by the identity - // field prepended to struct and array objects.) - a.copy(dst, src, 1) - } else { - a.addConstraint(&offsetAddrConstraint{offset, dst, src}) - } -} - -// typeAssert creates a typeFilter or untag constraint of the form dst = src.(T): -// typeFilter for an interface, untag for a concrete type. -// The exact flag is specified as for untagConstraint. -func (a *analysis) typeAssert(T types.Type, dst, src nodeid, exact bool) { - if isInterface(T) { - a.addConstraint(&typeFilterConstraint{T, dst, src}) - } else { - a.addConstraint(&untagConstraint{T, dst, src, exact}) - } -} - -// addConstraint adds c to the constraint set. -func (a *analysis) addConstraint(c constraint) { - a.constraints = append(a.constraints, c) - if a.log != nil { - fmt.Fprintf(a.log, "\t%s\n", c) - } -} - -// copyElems generates load/store constraints for *dst = *src, -// where src and dst are slices or *arrays. -func (a *analysis) copyElems(cgn *cgnode, typ types.Type, dst, src ssa.Value) { - tmp := a.addNodes(typ, "copy") - sz := a.sizeof(typ) - a.genLoad(cgn, tmp, src, 1, sz) - a.genStore(cgn, dst, tmp, 1, sz) -} - -// ---------- Constraint generation ---------- - -// genConv generates constraints for the conversion operation conv. -func (a *analysis) genConv(conv *ssa.Convert, cgn *cgnode) { - res := a.valueNode(conv) - if res == 0 { - return // result is non-pointerlike - } - - tSrc := conv.X.Type() - tDst := conv.Type() - - switch utSrc := tSrc.Underlying().(type) { - case *types.Slice: - // []byte/[]rune -> string? - return - - case *types.Pointer: - // *T -> unsafe.Pointer? - if tDst.Underlying() == tUnsafePtr { - return // we don't model unsafe aliasing (unsound) - } - - case *types.Basic: - switch tDst.Underlying().(type) { - case *types.Pointer: - // Treat unsafe.Pointer->*T conversions like - // new(T) and create an unaliased object. - if utSrc == tUnsafePtr { - obj := a.addNodes(mustDeref(tDst), "unsafe.Pointer conversion") - a.endObject(obj, cgn, conv) - a.addressOf(tDst, res, obj) - return - } - - case *types.Slice: - // string -> []byte/[]rune (or named aliases)? - if utSrc.Info()&types.IsString != 0 { - obj := a.addNodes(sliceToArray(tDst), "convert") - a.endObject(obj, cgn, conv) - a.addressOf(tDst, res, obj) - return - } - - case *types.Basic: - // All basic-to-basic type conversions are no-ops. - // This includes uintptr<->unsafe.Pointer conversions, - // which we (unsoundly) ignore. - return - } - } - - panic(fmt.Sprintf("illegal *ssa.Convert %s -> %s: %s", tSrc, tDst, conv.Parent())) -} - -// genAppend generates constraints for a call to append. -func (a *analysis) genAppend(instr *ssa.Call, cgn *cgnode) { - // Consider z = append(x, y). y is optional. - // This may allocate a new [1]T array; call its object w. - // We get the following constraints: - // z = x - // z = &w - // *z = *y - - x := instr.Call.Args[0] - - z := instr - a.copy(a.valueNode(z), a.valueNode(x), 1) // z = x - - if len(instr.Call.Args) == 1 { - return // no allocation for z = append(x) or _ = append(x). - } - - // TODO(adonovan): test append([]byte, ...string) []byte. - - y := instr.Call.Args[1] - tArray := sliceToArray(instr.Call.Args[0].Type()) - - w := a.nextNode() - a.addNodes(tArray, "append") - a.endObject(w, cgn, instr) - - a.copyElems(cgn, tArray.Elem(), z, y) // *z = *y - a.addressOf(instr.Type(), a.valueNode(z), w) // z = &w -} - -// genBuiltinCall generates constraints for a call to a built-in. -func (a *analysis) genBuiltinCall(instr ssa.CallInstruction, cgn *cgnode) { - call := instr.Common() - switch call.Value.(*ssa.Builtin).Name() { - case "append": - // Safe cast: append cannot appear in a go or defer statement. - a.genAppend(instr.(*ssa.Call), cgn) - - case "copy": - tElem := call.Args[0].Type().Underlying().(*types.Slice).Elem() - a.copyElems(cgn, tElem, call.Args[0], call.Args[1]) - - case "panic": - a.copy(a.panicNode, a.valueNode(call.Args[0]), 1) - - case "recover": - if v := instr.Value(); v != nil { - a.copy(a.valueNode(v), a.panicNode, 1) - } - - case "print": - // In the tests, the probe might be the sole reference - // to its arg, so make sure we create nodes for it. - if len(call.Args) > 0 { - a.valueNode(call.Args[0]) - } - - case "ssa:wrapnilchk": - a.copy(a.valueNode(instr.Value()), a.valueNode(call.Args[0]), 1) - - default: - // No-ops: close len cap real imag complex print println delete. - } -} - -// shouldUseContext defines the context-sensitivity policy. It -// returns true if we should analyse all static calls to fn anew. -// -// Obviously this interface rather limits how much freedom we have to -// choose a policy. The current policy, rather arbitrarily, is true -// for intrinsics and accessor methods (actually: short, single-block, -// call-free functions). This is just a starting point. -func (a *analysis) shouldUseContext(fn *ssa.Function) bool { - if a.findIntrinsic(fn) != nil { - return true // treat intrinsics context-sensitively - } - if len(fn.Blocks) != 1 { - return false // too expensive - } - blk := fn.Blocks[0] - if len(blk.Instrs) > 10 { - return false // too expensive - } - if fn.Synthetic != "" && (fn.Pkg == nil || fn != fn.Pkg.Func("init")) { - return true // treat synthetic wrappers context-sensitively - } - for _, instr := range blk.Instrs { - switch instr := instr.(type) { - case ssa.CallInstruction: - // Disallow function calls (except to built-ins) - // because of the danger of unbounded recursion. - if _, ok := instr.Common().Value.(*ssa.Builtin); !ok { - return false - } - } - } - return true -} - -// genStaticCall generates constraints for a statically dispatched function call. -func (a *analysis) genStaticCall(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) { - fn := call.StaticCallee() - - // Special cases for inlined intrinsics. - switch fn { - case a.runtimeSetFinalizer: - // Inline SetFinalizer so the call appears direct. - site.targets = a.addOneNode(tInvalid, "SetFinalizer.targets", nil) - a.addConstraint(&runtimeSetFinalizerConstraint{ - targets: site.targets, - x: a.valueNode(call.Args[0]), - f: a.valueNode(call.Args[1]), - }) - return - - case a.reflectValueCall: - // Inline (reflect.Value).Call so the call appears direct. - dotdotdot := false - ret := reflectCallImpl(a, caller, site, a.valueNode(call.Args[0]), a.valueNode(call.Args[1]), dotdotdot) - if result != 0 { - a.addressOf(fn.Signature.Results().At(0).Type(), result, ret) - } - return - } - - // Ascertain the context (contour/cgnode) for a particular call. - var obj nodeid - if a.shouldUseContext(fn) { - obj = a.makeFunctionObject(fn, site) // new contour - } else { - obj = a.objectNode(nil, fn) // shared contour - } - a.callEdge(caller, site, obj) - - sig := call.Signature() - - // Copy receiver, if any. - params := a.funcParams(obj) - args := call.Args - if sig.Recv() != nil { - sz := a.sizeof(sig.Recv().Type()) - a.copy(params, a.valueNode(args[0]), sz) - params += nodeid(sz) - args = args[1:] - } - - // Copy actual parameters into formal params block. - // Must loop, since the actuals aren't contiguous. - for i, arg := range args { - sz := a.sizeof(sig.Params().At(i).Type()) - a.copy(params, a.valueNode(arg), sz) - params += nodeid(sz) - } - - // Copy formal results block to actual result. - if result != 0 { - a.copy(result, a.funcResults(obj), a.sizeof(sig.Results())) - } -} - -// genDynamicCall generates constraints for a dynamic function call. -func (a *analysis) genDynamicCall(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) { - // pts(targets) will be the set of possible call targets. - site.targets = a.valueNode(call.Value) - - // We add dynamic closure rules that store the arguments into - // the P-block and load the results from the R-block of each - // function discovered in pts(targets). - - sig := call.Signature() - var offset uint32 = 1 // P/R block starts at offset 1 - for i, arg := range call.Args { - sz := a.sizeof(sig.Params().At(i).Type()) - a.genStore(caller, call.Value, a.valueNode(arg), offset, sz) - offset += sz - } - if result != 0 { - a.genLoad(caller, result, call.Value, offset, a.sizeof(sig.Results())) - } -} - -// genInvoke generates constraints for a dynamic method invocation. -func (a *analysis) genInvoke(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) { - if call.Value.Type() == a.reflectType { - a.genInvokeReflectType(caller, site, call, result) - return - } - - sig := call.Signature() - - // Allocate a contiguous targets/params/results block for this call. - block := a.nextNode() - // pts(targets) will be the set of possible call targets - site.targets = a.addOneNode(sig, "invoke.targets", nil) - p := a.addNodes(sig.Params(), "invoke.params") - r := a.addNodes(sig.Results(), "invoke.results") - - // Copy the actual parameters into the call's params block. - for i, n := 0, sig.Params().Len(); i < n; i++ { - sz := a.sizeof(sig.Params().At(i).Type()) - a.copy(p, a.valueNode(call.Args[i]), sz) - p += nodeid(sz) - } - // Copy the call's results block to the actual results. - if result != 0 { - a.copy(result, r, a.sizeof(sig.Results())) - } - - // We add a dynamic invoke constraint that will connect the - // caller's and the callee's P/R blocks for each discovered - // call target. - a.addConstraint(&invokeConstraint{call.Method, a.valueNode(call.Value), block}) -} - -// genInvokeReflectType is a specialization of genInvoke where the -// receiver type is a reflect.Type, under the assumption that there -// can be at most one implementation of this interface, *reflect.rtype. -// -// (Though this may appear to be an instance of a pattern---method -// calls on interfaces known to have exactly one implementation---in -// practice it occurs rarely, so we special case for reflect.Type.) -// -// In effect we treat this: -// -// var rt reflect.Type = ... -// rt.F() -// -// as this: -// -// rt.(*reflect.rtype).F() -func (a *analysis) genInvokeReflectType(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) { - // Unpack receiver into rtype - rtype := a.addOneNode(a.reflectRtypePtr, "rtype.recv", nil) - recv := a.valueNode(call.Value) - a.typeAssert(a.reflectRtypePtr, rtype, recv, true) - - // Look up the concrete method. - fn := a.prog.LookupMethod(a.reflectRtypePtr, call.Method.Pkg(), call.Method.Name()) - - obj := a.makeFunctionObject(fn, site) // new contour for this call - a.callEdge(caller, site, obj) - - // From now on, it's essentially a static call, but little is - // gained by factoring together the code for both cases. - - sig := fn.Signature // concrete method - targets := a.addOneNode(sig, "call.targets", nil) - a.addressOf(sig, targets, obj) // (a singleton) - - // Copy receiver. - params := a.funcParams(obj) - a.copy(params, rtype, 1) - params++ - - // Copy actual parameters into formal P-block. - // Must loop, since the actuals aren't contiguous. - for i, arg := range call.Args { - sz := a.sizeof(sig.Params().At(i).Type()) - a.copy(params, a.valueNode(arg), sz) - params += nodeid(sz) - } - - // Copy formal R-block to actual R-block. - if result != 0 { - a.copy(result, a.funcResults(obj), a.sizeof(sig.Results())) - } -} - -// genCall generates constraints for call instruction instr. -func (a *analysis) genCall(caller *cgnode, instr ssa.CallInstruction) { - call := instr.Common() - - // Intrinsic implementations of built-in functions. - if _, ok := call.Value.(*ssa.Builtin); ok { - a.genBuiltinCall(instr, caller) - return - } - - var result nodeid - if v := instr.Value(); v != nil { - result = a.valueNode(v) - } - - site := &callsite{instr: instr} - if call.StaticCallee() != nil { - a.genStaticCall(caller, site, call, result) - } else if call.IsInvoke() { - a.genInvoke(caller, site, call, result) - } else { - a.genDynamicCall(caller, site, call, result) - } - - caller.sites = append(caller.sites, site) - - if a.log != nil { - // TODO(adonovan): debug: improve log message. - fmt.Fprintf(a.log, "\t%s to targets %s from %s\n", site, site.targets, caller) - } -} - -// objectNode returns the object to which v points, if known. -// In other words, if the points-to set of v is a singleton, it -// returns the sole label, zero otherwise. -// -// We exploit this information to make the generated constraints less -// dynamic. For example, a complex load constraint can be replaced by -// a simple copy constraint when the sole destination is known a priori. -// -// Some SSA instructions always have singletons points-to sets: -// -// Alloc, Function, Global, MakeChan, MakeClosure, MakeInterface, MakeMap, MakeSlice. -// -// Others may be singletons depending on their operands: -// -// FreeVar, Const, Convert, FieldAddr, IndexAddr, Slice, SliceToArrayPointer. -// -// Idempotent. Objects are created as needed, possibly via recursion -// down the SSA value graph, e.g IndexAddr(FieldAddr(Alloc))). -func (a *analysis) objectNode(cgn *cgnode, v ssa.Value) nodeid { - switch v.(type) { - case *ssa.Global, *ssa.Function, *ssa.Const, *ssa.FreeVar: - // Global object. - obj, ok := a.globalobj[v] - if !ok { - switch v := v.(type) { - case *ssa.Global: - obj = a.nextNode() - a.addNodes(mustDeref(v.Type()), "global") - a.endObject(obj, nil, v) - - case *ssa.Function: - obj = a.makeFunctionObject(v, nil) - - case *ssa.Const: - // not addressable - - case *ssa.FreeVar: - // not addressable - } - - if a.log != nil { - fmt.Fprintf(a.log, "\tglobalobj[%s] = n%d\n", v, obj) - } - a.globalobj[v] = obj - } - return obj - } - - // Local object. - obj, ok := a.localobj[v] - if !ok { - switch v := v.(type) { - case *ssa.Alloc: - obj = a.nextNode() - a.addNodes(mustDeref(v.Type()), "alloc") - a.endObject(obj, cgn, v) - - case *ssa.MakeSlice: - obj = a.nextNode() - a.addNodes(sliceToArray(v.Type()), "makeslice") - a.endObject(obj, cgn, v) - - case *ssa.MakeChan: - obj = a.nextNode() - a.addNodes(v.Type().Underlying().(*types.Chan).Elem(), "makechan") - a.endObject(obj, cgn, v) - - case *ssa.MakeMap: - obj = a.nextNode() - tmap := v.Type().Underlying().(*types.Map) - a.addNodes(tmap.Key(), "makemap.key") - elem := a.addNodes(tmap.Elem(), "makemap.value") - - // To update the value field, MapUpdate - // generates store-with-offset constraints which - // the presolver can't model, so we must mark - // those nodes indirect. - for id, end := elem, elem+nodeid(a.sizeof(tmap.Elem())); id < end; id++ { - a.mapValues = append(a.mapValues, id) - } - a.endObject(obj, cgn, v) - - case *ssa.MakeInterface: - tConc := v.X.Type() - obj = a.makeTagged(tConc, cgn, v) - - // Copy the value into it, if nontrivial. - if x := a.valueNode(v.X); x != 0 { - a.copy(obj+1, x, a.sizeof(tConc)) - } - - case *ssa.FieldAddr: - if xobj := a.objectNode(cgn, v.X); xobj != 0 { - obj = xobj + nodeid(a.offsetOf(mustDeref(v.X.Type()), v.Field)) - } - - case *ssa.IndexAddr: - if xobj := a.objectNode(cgn, v.X); xobj != 0 { - obj = xobj + 1 - } - - case *ssa.Slice: - obj = a.objectNode(cgn, v.X) - - case *ssa.SliceToArrayPointer: - // Going from a []T to a *[k]T for some k. - // A slice []T is treated as if it were a *T pointer. - obj = a.objectNode(cgn, v.X) - - case *ssa.Convert: - // TODO(adonovan): opt: handle these cases too: - // - unsafe.Pointer->*T conversion acts like Alloc - // - string->[]byte/[]rune conversion acts like MakeSlice - } - - if a.log != nil { - fmt.Fprintf(a.log, "\tlocalobj[%s] = n%d\n", v.Name(), obj) - } - a.localobj[v] = obj - } - return obj -} - -// genLoad generates constraints for result = *(ptr + val). -func (a *analysis) genLoad(cgn *cgnode, result nodeid, ptr ssa.Value, offset, sizeof uint32) { - if obj := a.objectNode(cgn, ptr); obj != 0 { - // Pre-apply loadConstraint.solve(). - a.copy(result, obj+nodeid(offset), sizeof) - } else { - a.load(result, a.valueNode(ptr), offset, sizeof) - } -} - -// genOffsetAddr generates constraints for a 'v=ptr.field' (FieldAddr) -// or 'v=ptr[*]' (IndexAddr) instruction v. -func (a *analysis) genOffsetAddr(cgn *cgnode, v ssa.Value, ptr nodeid, offset uint32) { - dst := a.valueNode(v) - if obj := a.objectNode(cgn, v); obj != 0 { - // Pre-apply offsetAddrConstraint.solve(). - a.addressOf(v.Type(), dst, obj) - } else { - a.offsetAddr(v.Type(), dst, ptr, offset) - } -} - -// genStore generates constraints for *(ptr + offset) = val. -func (a *analysis) genStore(cgn *cgnode, ptr ssa.Value, val nodeid, offset, sizeof uint32) { - if obj := a.objectNode(cgn, ptr); obj != 0 { - // Pre-apply storeConstraint.solve(). - a.copy(obj+nodeid(offset), val, sizeof) - } else { - a.store(a.valueNode(ptr), val, offset, sizeof) - } -} - -// genInstr generates constraints for instruction instr in context cgn. -func (a *analysis) genInstr(cgn *cgnode, instr ssa.Instruction) { - if a.log != nil { - var prefix string - if val, ok := instr.(ssa.Value); ok { - prefix = val.Name() + " = " - } - fmt.Fprintf(a.log, "; %s%s\n", prefix, instr) - } - - switch instr := instr.(type) { - case *ssa.DebugRef: - // no-op. - - case *ssa.UnOp: - switch instr.Op { - case token.ARROW: // <-x - // We can ignore instr.CommaOk because the node we're - // altering is always at zero offset relative to instr - tElem := instr.X.Type().Underlying().(*types.Chan).Elem() - a.genLoad(cgn, a.valueNode(instr), instr.X, 0, a.sizeof(tElem)) - - case token.MUL: // *x - a.genLoad(cgn, a.valueNode(instr), instr.X, 0, a.sizeof(instr.Type())) - - default: - // NOT, SUB, XOR: no-op. - } - - case *ssa.BinOp: - // All no-ops. - - case ssa.CallInstruction: // *ssa.Call, *ssa.Go, *ssa.Defer - a.genCall(cgn, instr) - - case *ssa.ChangeType: - a.copy(a.valueNode(instr), a.valueNode(instr.X), 1) - - case *ssa.Convert: - a.genConv(instr, cgn) - - case *ssa.Extract: - a.copy(a.valueNode(instr), - a.valueOffsetNode(instr.Tuple, instr.Index), - a.sizeof(instr.Type())) - - case *ssa.FieldAddr: - a.genOffsetAddr(cgn, instr, a.valueNode(instr.X), - a.offsetOf(mustDeref(instr.X.Type()), instr.Field)) - - case *ssa.IndexAddr: - a.genOffsetAddr(cgn, instr, a.valueNode(instr.X), 1) - - case *ssa.Field: - a.copy(a.valueNode(instr), - a.valueOffsetNode(instr.X, instr.Field), - a.sizeof(instr.Type())) - - case *ssa.Index: - _, isstring := typeparams.CoreType(instr.X.Type()).(*types.Basic) - if !isstring { - a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type())) - } - - case *ssa.Select: - recv := a.valueOffsetNode(instr, 2) // instr : (index, recvOk, recv0, ... recv_n-1) - for _, st := range instr.States { - elemSize := a.sizeof(st.Chan.Type().Underlying().(*types.Chan).Elem()) - switch st.Dir { - case types.RecvOnly: - a.genLoad(cgn, recv, st.Chan, 0, elemSize) - recv += nodeid(elemSize) - - case types.SendOnly: - a.genStore(cgn, st.Chan, a.valueNode(st.Send), 0, elemSize) - } - } - - case *ssa.Return: - results := a.funcResults(cgn.obj) - for _, r := range instr.Results { - sz := a.sizeof(r.Type()) - a.copy(results, a.valueNode(r), sz) - results += nodeid(sz) - } - - case *ssa.Send: - a.genStore(cgn, instr.Chan, a.valueNode(instr.X), 0, a.sizeof(instr.X.Type())) - - case *ssa.Store: - a.genStore(cgn, instr.Addr, a.valueNode(instr.Val), 0, a.sizeof(instr.Val.Type())) - - case *ssa.Alloc, *ssa.MakeSlice, *ssa.MakeChan, *ssa.MakeMap, *ssa.MakeInterface: - v := instr.(ssa.Value) - a.addressOf(v.Type(), a.valueNode(v), a.objectNode(cgn, v)) - - case *ssa.ChangeInterface: - a.copy(a.valueNode(instr), a.valueNode(instr.X), 1) - - case *ssa.TypeAssert: - a.typeAssert(instr.AssertedType, a.valueNode(instr), a.valueNode(instr.X), true) - - case *ssa.Slice: - a.copy(a.valueNode(instr), a.valueNode(instr.X), 1) - - case *ssa.SliceToArrayPointer: - // Going from a []T to a *[k]T (for some k) is a single `dst = src` constraint. - // Both []T and *[k]T are modelled as an *IdArrayT where IdArrayT is the identity - // node for an array of type T, i.e `type IdArrayT struct{elem T}`. - a.copy(a.valueNode(instr), a.valueNode(instr.X), 1) - - case *ssa.If, *ssa.Jump: - // no-op. - - case *ssa.Phi: - sz := a.sizeof(instr.Type()) - for _, e := range instr.Edges { - a.copy(a.valueNode(instr), a.valueNode(e), sz) - } - - case *ssa.MakeClosure: - fn := instr.Fn.(*ssa.Function) - a.copy(a.valueNode(instr), a.valueNode(fn), 1) - // Free variables are treated like global variables. - for i, b := range instr.Bindings { - a.copy(a.valueNode(fn.FreeVars[i]), a.valueNode(b), a.sizeof(b.Type())) - } - - case *ssa.RunDefers: - // The analysis is flow insensitive, so we just "call" - // defers as we encounter them. - - case *ssa.Range: - // Do nothing. Next{Iter: *ssa.Range} handles this case. - - case *ssa.Next: - if !instr.IsString { - // Assumes that Next is always directly applied to a Range result - // for a map. - - // Next results in a destination tuple (ok, dk, dv). - // Recall a map is modeled as type *M where M = struct{sk K; sv V}. - // Next copies from a src map struct{sk K; sv V} to a dst tuple (ok, dk, dv) - // - // When keys or value is a blank identifier in a range statement, e.g - // for _, v := range m { ... } - // or - // for _, _ = range m { ... } - // we skip copying from sk or dk as there is no use. dk and dv will have - // Invalid types if they are blank identifiers. This means that the - // size( (ok, dk, dv) ) may differ from 1 + size(struct{sk K; sv V}). - // - // We encode Next using one load of size sz from an offset in src osrc to an - // offset in dst odst. There are 4 cases to consider: - // odst | osrc | sz - // k, v | 1 | 0 | size(sk) + size(sv) - // k, _ | 1 | 0 | size(sk) - // _, v | 1+size(dk) | size(sk) | size(sv) - // _, _ | 1+size(dk) | size(sk) | 0 - - // get the source key and value size. Note the source types - // may be different than the 3-tuple types, but if this is the - // case then the source is assignable to the destination. - theMap := instr.Iter.(*ssa.Range).X - tMap := theMap.Type().Underlying().(*types.Map) - - sksize := a.sizeof(tMap.Key()) - svsize := a.sizeof(tMap.Elem()) - - // get the key size of the destination tuple. - tTuple := instr.Type().(*types.Tuple) - dksize := a.sizeof(tTuple.At(1).Type()) - - // Load from the map's (k,v) into the tuple's (ok, k, v). - osrc := uint32(0) // offset within map object - odst := uint32(1) // offset within tuple (initially just after 'ok bool') - sz := uint32(0) // amount to copy - - // Is key valid? - if tTuple.At(1).Type() != tInvalid { - sz += sksize - } else { - odst += dksize - osrc += sksize - } - - // Is value valid? - if tTuple.At(2).Type() != tInvalid { - sz += svsize - } - - a.genLoad(cgn, a.valueNode(instr)+nodeid(odst), theMap, osrc, sz) - } - - case *ssa.Lookup: - if tMap, ok := instr.X.Type().Underlying().(*types.Map); ok { - // CommaOk can be ignored: field 0 is a no-op. - ksize := a.sizeof(tMap.Key()) - vsize := a.sizeof(tMap.Elem()) - a.genLoad(cgn, a.valueNode(instr), instr.X, ksize, vsize) - } - - case *ssa.MapUpdate: - tmap := instr.Map.Type().Underlying().(*types.Map) - ksize := a.sizeof(tmap.Key()) - vsize := a.sizeof(tmap.Elem()) - a.genStore(cgn, instr.Map, a.valueNode(instr.Key), 0, ksize) - a.genStore(cgn, instr.Map, a.valueNode(instr.Value), ksize, vsize) - - case *ssa.Panic: - a.copy(a.panicNode, a.valueNode(instr.X), 1) - - default: - panic(fmt.Sprintf("unimplemented: %T", instr)) - } -} - -func (a *analysis) makeCGNode(fn *ssa.Function, obj nodeid, callersite *callsite) *cgnode { - cgn := &cgnode{fn: fn, obj: obj, callersite: callersite} - a.cgnodes = append(a.cgnodes, cgn) - return cgn -} - -// genRootCalls generates the synthetic root of the callgraph and the -// initial calls from it to the analysis scope, such as main, a test -// or a library. -func (a *analysis) genRootCalls() *cgnode { - r := a.prog.NewFunction("", new(types.Signature), "root of callgraph") - root := a.makeCGNode(r, 0, nil) - - // TODO(adonovan): make an ssa utility to construct an actual - // root function so we don't need to special-case site-less - // call edges. - - // For each main package, call main.init(), main.main(). - for _, mainPkg := range a.config.Mains { - main := mainPkg.Func("main") - if main == nil { - panic(fmt.Sprintf("%s has no main function", mainPkg)) - } - - targets := a.addOneNode(main.Signature, "root.targets", nil) - site := &callsite{targets: targets} - root.sites = append(root.sites, site) - for _, fn := range [2]*ssa.Function{mainPkg.Func("init"), main} { - if a.log != nil { - fmt.Fprintf(a.log, "\troot call to %s:\n", fn) - } - a.copy(targets, a.valueNode(fn), 1) - } - } - - return root -} - -// genFunc generates constraints for function fn. -func (a *analysis) genFunc(cgn *cgnode) { - fn := cgn.fn - - impl := a.findIntrinsic(fn) - - if a.log != nil { - fmt.Fprintf(a.log, "\n\n==== Generating constraints for %s, %s\n", cgn, cgn.contour()) - - // Hack: don't display body if intrinsic. - if impl != nil { - fn2 := *cgn.fn // copy - fn2.Locals = nil - fn2.Blocks = nil - fn2.WriteTo(a.log) - } else { - cgn.fn.WriteTo(a.log) - } - } - - if impl != nil { - impl(a, cgn) - return - } - - if fn.Blocks == nil { - // External function with no intrinsic treatment. - // We'll warn about calls to such functions at the end. - return - } - - if fn.TypeParams().Len() > 0 && len(fn.TypeArgs()) == 0 { - // Body of generic function. - // We'll warn about calls to such functions at the end. - return - } - - if strings.HasPrefix(fn.Synthetic, "instantiation wrapper ") { - // instantiation wrapper of a generic function. - // These may contain type coercions which are not currently supported. - // We'll warn about calls to such functions at the end. - return - } - - if a.log != nil { - fmt.Fprintln(a.log, "; Creating nodes for local values") - } - - a.localval = make(map[ssa.Value]nodeid) - a.localobj = make(map[ssa.Value]nodeid) - - // The value nodes for the params are in the func object block. - params := a.funcParams(cgn.obj) - for _, p := range fn.Params { - a.setValueNode(p, params, cgn) - params += nodeid(a.sizeof(p.Type())) - } - - // Free variables have global cardinality: - // the outer function sets them with MakeClosure; - // the inner function accesses them with FreeVar. - // - // TODO(adonovan): treat free vars context-sensitively. - - // Create value nodes for all value instructions - // since SSA may contain forward references. - var space [10]*ssa.Value - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - switch instr := instr.(type) { - case *ssa.Range: - // do nothing: it has a funky type, - // and *ssa.Next does all the work. - - case ssa.Value: - var comment string - if a.log != nil { - comment = instr.Name() - } - id := a.addNodes(instr.Type(), comment) - a.setValueNode(instr, id, cgn) - } - - // Record all address-taken functions (for presolver). - rands := instr.Operands(space[:0]) - if call, ok := instr.(ssa.CallInstruction); ok && !call.Common().IsInvoke() { - // Skip CallCommon.Value in "call" mode. - // TODO(adonovan): fix: relies on unspecified ordering. Specify it. - rands = rands[1:] - } - for _, rand := range rands { - if atf, ok := (*rand).(*ssa.Function); ok { - a.atFuncs[atf] = true - } - } - } - } - - // Generate constraints for instructions. - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - a.genInstr(cgn, instr) - } - } - - a.localval = nil - a.localobj = nil -} - -// genMethodsOf generates nodes and constraints for all methods of type T. -func (a *analysis) genMethodsOf(T types.Type) { - itf := isInterface(T) - - // TODO(adonovan): can we skip this entirely if itf is true? - // I think so, but the answer may depend on reflection. - mset := a.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - m := a.prog.MethodValue(mset.At(i)) - a.valueNode(m) - - if !itf { - // Methods of concrete types are address-taken functions. - a.atFuncs[m] = true - } - } -} - -// generate generates offline constraints for the entire program. -func (a *analysis) generate() { - start("Constraint generation") - if a.log != nil { - fmt.Fprintln(a.log, "==== Generating constraints") - } - - // Create a dummy node since we use the nodeid 0 for - // non-pointerlike variables. - a.addNodes(tInvalid, "(zero)") - - // Create the global node for panic values. - a.panicNode = a.addNodes(tEface, "panic") - - // Create nodes and constraints for all methods of reflect.rtype. - // (Shared contours are used by dynamic calls to reflect.Type - // methods---typically just String().) - if rtype := a.reflectRtypePtr; rtype != nil { - a.genMethodsOf(rtype) - } - - root := a.genRootCalls() - - if a.config.BuildCallGraph { - a.result.CallGraph = callgraph.New(root.fn) - } - - // Create nodes and constraints for all methods of all types - // that are dynamically accessible via reflection or interfaces. - for _, T := range a.prog.RuntimeTypes() { - a.genMethodsOf(T) - } - - // Generate constraints for functions as they become reachable - // from the roots. (No constraints are generated for functions - // that are dead in this analysis scope.) - for len(a.genq) > 0 { - cgn := a.genq[0] - a.genq = a.genq[1:] - a.genFunc(cgn) - } - - // The runtime magically allocates os.Args; so should we. - if os := a.prog.ImportedPackage("os"); os != nil { - // In effect: os.Args = new([1]string)[:] - T := types.NewSlice(types.Typ[types.String]) - obj := a.addNodes(sliceToArray(T), "") - a.endObject(obj, nil, "") - a.addressOf(T, a.objectNode(nil, os.Var("Args")), obj) - } - - // Discard generation state, to avoid confusion after node renumbering. - a.panicNode = 0 - a.globalval = nil - a.localval = nil - a.localobj = nil - - stop("Constraint generation") -} diff --git a/go/pointer/go.mod b/go/pointer/go.mod deleted file mode 100644 index 168aa253859..00000000000 --- a/go/pointer/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module golang.org/x/tools/go/pointer - -go 1.18 // tagx:compat 1.16 - -require golang.org/x/sys v0.8.0 - -require ( - golang.org/x/mod v0.10.0 // indirect - golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a -) diff --git a/go/pointer/go.sum b/go/pointer/go.sum deleted file mode 100644 index c7c351dba1d..00000000000 --- a/go/pointer/go.sum +++ /dev/null @@ -1,7 +0,0 @@ -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a h1:rym71QNKHeCt6OA9UbKSr3jmBnbRNACMDLX5zI18ZOk= -golang.org/x/tools v0.9.2-0.20230531220058-a260315e300a/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= diff --git a/go/pointer/hvn.go b/go/pointer/hvn.go deleted file mode 100644 index ad25cdfa483..00000000000 --- a/go/pointer/hvn.go +++ /dev/null @@ -1,968 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file implements Hash-Value Numbering (HVN), a pre-solver -// constraint optimization described in Hardekopf & Lin, SAS'07 (see -// doc.go) that analyses the graph topology to determine which sets of -// variables are "pointer equivalent" (PE), i.e. must have identical -// points-to sets in the solution. -// -// A separate ("offline") graph is constructed. Its nodes are those of -// the main-graph, plus an additional node *X for each pointer node X. -// With this graph we can reason about the unknown points-to set of -// dereferenced pointers. (We do not generalize this to represent -// unknown fields x->f, perhaps because such fields would be numerous, -// though it might be worth an experiment.) -// -// Nodes whose points-to relations are not entirely captured by the -// graph are marked as "indirect": the *X nodes, the parameters of -// address-taken functions (which includes all functions in method -// sets), or nodes updated by the solver rules for reflection, etc. -// -// All addr (y=&x) nodes are initially assigned a pointer-equivalence -// (PE) label equal to x's nodeid in the main graph. (These are the -// only PE labels that are less than len(a.nodes).) -// -// All offsetAddr (y=&x.f) constraints are initially assigned a PE -// label; such labels are memoized, keyed by (x, f), so that equivalent -// nodes y as assigned the same label. -// -// Then we process each strongly connected component (SCC) of the graph -// in topological order, assigning it a PE label based on the set P of -// PE labels that flow to it from its immediate dependencies. -// -// If any node in P is "indirect", the entire SCC is assigned a fresh PE -// label. Otherwise: -// -// |P|=0 if P is empty, all nodes in the SCC are non-pointers (e.g. -// uninitialized variables, or formal params of dead functions) -// and the SCC is assigned the PE label of zero. -// -// |P|=1 if P is a singleton, the SCC is assigned the same label as the -// sole element of P. -// -// |P|>1 if P contains multiple labels, a unique label representing P is -// invented and recorded in an hash table, so that other -// equivalent SCCs may also be assigned this label, akin to -// conventional hash-value numbering in a compiler. -// -// Finally, a renumbering is computed such that each node is replaced by -// the lowest-numbered node with the same PE label. All constraints are -// renumbered, and any resulting duplicates are eliminated. -// -// The only nodes that are not renumbered are the objects x in addr -// (y=&x) constraints, since the ids of these nodes (and fields derived -// from them via offsetAddr rules) are the elements of all points-to -// sets, so they must remain as they are if we want the same solution. -// -// The solverStates (node.solve) for nodes in the same equivalence class -// are linked together so that all nodes in the class have the same -// solution. This avoids the need to renumber nodeids buried in -// Queries, cgnodes, etc (like (*analysis).renumber() does) since only -// the solution is needed. -// -// The result of HVN is that the number of distinct nodes and -// constraints is reduced, but the solution is identical (almost---see -// CROSS-CHECK below). In particular, both linear and cyclic chains of -// copies are each replaced by a single node. -// -// Nodes and constraints created "online" (e.g. while solving reflection -// constraints) are not subject to this optimization. -// -// PERFORMANCE -// -// In two benchmarks (guru and godoc), HVN eliminates about two thirds -// of nodes, the majority accounted for by non-pointers: nodes of -// non-pointer type, pointers that remain nil, formal parameters of dead -// functions, nodes of untracked types, etc. It also reduces the number -// of constraints, also by about two thirds, and the solving time by -// 30--42%, although we must pay about 15% for the running time of HVN -// itself. The benefit is greater for larger applications. -// -// There are many possible optimizations to improve the performance: -// * Use fewer than 1:1 onodes to main graph nodes: many of the onodes -// we create are not needed. -// * HU (HVN with Union---see paper): coalesce "union" peLabels when -// their expanded-out sets are equal. -// * HR (HVN with deReference---see paper): this will require that we -// apply HVN until fixed point, which may need more bookkeeping of the -// correspondence of main nodes to onodes. -// * Location Equivalence (see paper): have points-to sets contain not -// locations but location-equivalence class labels, each representing -// a set of locations. -// * HVN with field-sensitive ref: model each of the fields of a -// pointer-to-struct. -// -// CROSS-CHECK -// -// To verify the soundness of the optimization, when the -// debugHVNCrossCheck option is enabled, we run the solver twice, once -// before and once after running HVN, dumping the solution to disk, and -// then we compare the results. If they are not identical, the analysis -// panics. -// -// The solution dumped to disk includes only the N*N submatrix of the -// complete solution where N is the number of nodes after generation. -// In other words, we ignore pointer variables and objects created by -// the solver itself, since their numbering depends on the solver order, -// which is affected by the optimization. In any case, that's the only -// part the client cares about. -// -// The cross-check is too strict and may fail spuriously. Although the -// H&L paper describing HVN states that the solutions obtained should be -// identical, this is not the case in practice because HVN can collapse -// cycles involving *p even when pts(p)={}. Consider this example -// distilled from testdata/hello.go: -// -// var x T -// func f(p **T) { -// t0 = *p -// ... -// t1 = φ(t0, &x) -// *p = t1 -// } -// -// If f is dead code, we get: -// unoptimized: pts(p)={} pts(t0)={} pts(t1)={&x} -// optimized: pts(p)={} pts(t0)=pts(t1)=pts(*p)={&x} -// -// It's hard to argue that this is a bug: the result is sound and the -// loss of precision is inconsequential---f is dead code, after all. -// But unfortunately it limits the usefulness of the cross-check since -// failures must be carefully analyzed. Ben Hardekopf suggests (in -// personal correspondence) some approaches to mitigating it: -// -// If there is a node with an HVN points-to set that is a superset -// of the NORM points-to set, then either it's a bug or it's a -// result of this issue. If it's a result of this issue, then in -// the offline constraint graph there should be a REF node inside -// some cycle that reaches this node, and in the NORM solution the -// pointer being dereferenced by that REF node should be the empty -// set. If that isn't true then this is a bug. If it is true, then -// you can further check that in the NORM solution the "extra" -// points-to info in the HVN solution does in fact come from that -// purported cycle (if it doesn't, then this is still a bug). If -// you're doing the further check then you'll need to do it for -// each "extra" points-to element in the HVN points-to set. -// -// There are probably ways to optimize these checks by taking -// advantage of graph properties. For example, extraneous points-to -// info will flow through the graph and end up in many -// nodes. Rather than checking every node with extra info, you -// could probably work out the "origin point" of the extra info and -// just check there. Note that the check in the first bullet is -// looking for soundness bugs, while the check in the second bullet -// is looking for precision bugs; depending on your needs, you may -// care more about one than the other. -// -// which we should evaluate. The cross-check is nonetheless invaluable -// for all but one of the programs in the pointer_test suite. - -import ( - "fmt" - "go/types" - "io" - "reflect" - - "golang.org/x/tools/container/intsets" -) - -// A peLabel is a pointer-equivalence label: two nodes with the same -// peLabel have identical points-to solutions. -// -// The numbers are allocated consecutively like so: -// -// 0 not a pointer -// 1..N-1 addrConstraints (equals the constraint's .src field, hence sparse) -// ... offsetAddr constraints -// ... SCCs (with indirect nodes or multiple inputs) -// -// Each PE label denotes a set of pointers containing a single addr, a -// single offsetAddr, or some set of other PE labels. -type peLabel int - -type hvn struct { - a *analysis - N int // len(a.nodes) immediately after constraint generation - log io.Writer // (optional) log of HVN lemmas - onodes []*onode // nodes of the offline graph - label peLabel // the next available PE label - hvnLabel map[string]peLabel // hash-value numbering (PE label) for each set of onodeids - stack []onodeid // DFS stack - index int32 // next onode.index, from Tarjan's SCC algorithm - - // For each distinct offsetAddrConstraint (src, offset) pair, - // offsetAddrLabels records a unique PE label >= N. - offsetAddrLabels map[offsetAddr]peLabel -} - -// The index of an node in the offline graph. -// (Currently the first N align with the main nodes, -// but this may change with HRU.) -type onodeid uint32 - -// An onode is a node in the offline constraint graph. -// (Where ambiguous, members of analysis.nodes are referred to as -// "main graph" nodes.) -// -// Edges in the offline constraint graph (edges and implicit) point to -// the source, i.e. against the flow of values: they are dependencies. -// Implicit edges are used for SCC computation, but not for gathering -// incoming labels. -type onode struct { - rep onodeid // index of representative of SCC in offline constraint graph - - edges intsets.Sparse // constraint edges X-->Y (this onode is X) - implicit intsets.Sparse // implicit edges *X-->*Y (this onode is X) - peLabels intsets.Sparse // set of peLabels are pointer-equivalent to this one - indirect bool // node has points-to relations not represented in graph - - // Tarjan's SCC algorithm - index, lowlink int32 // Tarjan numbering - scc int32 // -ve => on stack; 0 => unvisited; +ve => node is root of a found SCC -} - -type offsetAddr struct { - ptr nodeid - offset uint32 -} - -// nextLabel issues the next unused pointer-equivalence label. -func (h *hvn) nextLabel() peLabel { - h.label++ - return h.label -} - -// ref(X) returns the index of the onode for *X. -func (h *hvn) ref(id onodeid) onodeid { - return id + onodeid(len(h.a.nodes)) -} - -// hvn computes pointer-equivalence labels (peLabels) using the Hash-based -// Value Numbering (HVN) algorithm described in Hardekopf & Lin, SAS'07. -func (a *analysis) hvn() { - start("HVN") - - if a.log != nil { - fmt.Fprintf(a.log, "\n\n==== Pointer equivalence optimization\n\n") - } - - h := hvn{ - a: a, - N: len(a.nodes), - log: a.log, - hvnLabel: make(map[string]peLabel), - offsetAddrLabels: make(map[offsetAddr]peLabel), - } - - if h.log != nil { - fmt.Fprintf(h.log, "\nCreating offline graph nodes...\n") - } - - // Create offline nodes. The first N nodes correspond to main - // graph nodes; the next N are their corresponding ref() nodes. - h.onodes = make([]*onode, 2*h.N) - for id := range a.nodes { - id := onodeid(id) - h.onodes[id] = &onode{} - h.onodes[h.ref(id)] = &onode{indirect: true} - } - - // Each node initially represents just itself. - for id, o := range h.onodes { - o.rep = onodeid(id) - } - - h.markIndirectNodes() - - // Reserve the first N PE labels for addrConstraints. - h.label = peLabel(h.N) - - // Add offline constraint edges. - if h.log != nil { - fmt.Fprintf(h.log, "\nAdding offline graph edges...\n") - } - for _, c := range a.constraints { - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "; %s\n", c) - } - c.presolve(&h) - } - - // Find and collapse SCCs. - if h.log != nil { - fmt.Fprintf(h.log, "\nFinding SCCs...\n") - } - h.index = 1 - for id, o := range h.onodes { - if id > 0 && o.index == 0 { - // Start depth-first search at each unvisited node. - h.visit(onodeid(id)) - } - } - - // Dump the solution - // (NB: somewhat redundant with logging from simplify().) - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\nPointer equivalences:\n") - for id, o := range h.onodes { - if id == 0 { - continue - } - if id == int(h.N) { - fmt.Fprintf(h.log, "---\n") - } - fmt.Fprintf(h.log, "o%d\t", id) - if o.rep != onodeid(id) { - fmt.Fprintf(h.log, "rep=o%d", o.rep) - } else { - fmt.Fprintf(h.log, "p%d", o.peLabels.Min()) - if o.indirect { - fmt.Fprint(h.log, " indirect") - } - } - fmt.Fprintln(h.log) - } - } - - // Simplify the main constraint graph - h.simplify() - - a.showCounts() - - stop("HVN") -} - -// ---- constraint-specific rules ---- - -// dst := &src -func (c *addrConstraint) presolve(h *hvn) { - // Each object (src) is an initial PE label. - label := peLabel(c.src) // label < N - if debugHVNVerbose && h.log != nil { - // duplicate log messages are possible - fmt.Fprintf(h.log, "\tcreate p%d: {&n%d}\n", label, c.src) - } - odst := onodeid(c.dst) - osrc := onodeid(c.src) - - // Assign dst this label. - h.onodes[odst].peLabels.Insert(int(label)) - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d has p%d\n", odst, label) - } - - h.addImplicitEdge(h.ref(odst), osrc) // *dst ~~> src. -} - -// dst = src -func (c *copyConstraint) presolve(h *hvn) { - odst := onodeid(c.dst) - osrc := onodeid(c.src) - h.addEdge(odst, osrc) // dst --> src - h.addImplicitEdge(h.ref(odst), h.ref(osrc)) // *dst ~~> *src -} - -// dst = *src + offset -func (c *loadConstraint) presolve(h *hvn) { - odst := onodeid(c.dst) - osrc := onodeid(c.src) - if c.offset == 0 { - h.addEdge(odst, h.ref(osrc)) // dst --> *src - } else { - // We don't interpret load-with-offset, e.g. results - // of map value lookup, R-block of dynamic call, slice - // copy/append, reflection. - h.markIndirect(odst, "load with offset") - } -} - -// *dst + offset = src -func (c *storeConstraint) presolve(h *hvn) { - odst := onodeid(c.dst) - osrc := onodeid(c.src) - if c.offset == 0 { - h.onodes[h.ref(odst)].edges.Insert(int(osrc)) // *dst --> src - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d --> o%d\n", h.ref(odst), osrc) - } - } - // We don't interpret store-with-offset. - // See discussion of soundness at markIndirectNodes. -} - -// dst = &src.offset -func (c *offsetAddrConstraint) presolve(h *hvn) { - // Give each distinct (addr, offset) pair a fresh PE label. - // The cache performs CSE, effectively. - key := offsetAddr{c.src, c.offset} - label, ok := h.offsetAddrLabels[key] - if !ok { - label = h.nextLabel() - h.offsetAddrLabels[key] = label - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tcreate p%d: {&n%d.#%d}\n", - label, c.src, c.offset) - } - } - - // Assign dst this label. - h.onodes[c.dst].peLabels.Insert(int(label)) - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d has p%d\n", c.dst, label) - } -} - -// dst = src.(typ) where typ is an interface -func (c *typeFilterConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.dst), "typeFilter result") -} - -// dst = src.(typ) where typ is concrete -func (c *untagConstraint) presolve(h *hvn) { - odst := onodeid(c.dst) - for end := odst + onodeid(h.a.sizeof(c.typ)); odst < end; odst++ { - h.markIndirect(odst, "untag result") - } -} - -// dst = src.method(c.params...) -func (c *invokeConstraint) presolve(h *hvn) { - // All methods are address-taken functions, so - // their formal P-blocks were already marked indirect. - - // Mark the caller's targets node as indirect. - sig := c.method.Type().(*types.Signature) - id := c.params - h.markIndirect(onodeid(c.params), "invoke targets node") - id++ - - id += nodeid(h.a.sizeof(sig.Params())) - - // Mark the caller's R-block as indirect. - end := id + nodeid(h.a.sizeof(sig.Results())) - for id < end { - h.markIndirect(onodeid(id), "invoke R-block") - id++ - } -} - -// markIndirectNodes marks as indirect nodes whose points-to relations -// are not entirely captured by the offline graph, including: -// -// (a) All address-taken nodes (including the following nodes within -// the same object). This is described in the paper. -// -// The most subtle cause of indirect nodes is the generation of -// store-with-offset constraints since the offline graph doesn't -// represent them. A global audit of constraint generation reveals the -// following uses of store-with-offset: -// -// (b) genDynamicCall, for P-blocks of dynamically called functions, -// to which dynamic copy edges will be added to them during -// solving: from storeConstraint for standalone functions, -// and from invokeConstraint for methods. -// All such P-blocks must be marked indirect. -// (c) MakeUpdate, to update the value part of a map object. -// All MakeMap objects's value parts must be marked indirect. -// (d) copyElems, to update the destination array. -// All array elements must be marked indirect. -// -// Not all indirect marking happens here. ref() nodes are marked -// indirect at construction, and each constraint's presolve() method may -// mark additional nodes. -func (h *hvn) markIndirectNodes() { - // (a) all address-taken nodes, plus all nodes following them - // within the same object, since these may be indirectly - // stored or address-taken. - for _, c := range h.a.constraints { - if c, ok := c.(*addrConstraint); ok { - start := h.a.enclosingObj(c.src) - end := start + nodeid(h.a.nodes[start].obj.size) - for id := c.src; id < end; id++ { - h.markIndirect(onodeid(id), "A-T object") - } - } - } - - // (b) P-blocks of all address-taken functions. - for id := 0; id < h.N; id++ { - obj := h.a.nodes[id].obj - - // TODO(adonovan): opt: if obj.cgn.fn is a method and - // obj.cgn is not its shared contour, this is an - // "inlined" static method call. We needn't consider it - // address-taken since no invokeConstraint will affect it. - - if obj != nil && obj.flags&otFunction != 0 && h.a.atFuncs[obj.cgn.fn] { - // address-taken function - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "n%d is address-taken: %s\n", id, obj.cgn.fn) - } - h.markIndirect(onodeid(id), "A-T func identity") - id++ - sig := obj.cgn.fn.Signature - psize := h.a.sizeof(sig.Params()) - if sig.Recv() != nil { - psize += h.a.sizeof(sig.Recv().Type()) - } - for end := id + int(psize); id < end; id++ { - h.markIndirect(onodeid(id), "A-T func P-block") - } - id-- - continue - } - } - - // (c) all map objects' value fields. - for _, id := range h.a.mapValues { - h.markIndirect(onodeid(id), "makemap.value") - } - - // (d) all array element objects. - // TODO(adonovan): opt: can we do better? - for id := 0; id < h.N; id++ { - // Identity node for an object of array type? - if tArray, ok := h.a.nodes[id].typ.(*types.Array); ok { - // Mark the array element nodes indirect. - // (Skip past the identity field.) - for range h.a.flatten(tArray.Elem()) { - id++ - h.markIndirect(onodeid(id), "array elem") - } - } - } -} - -func (h *hvn) markIndirect(oid onodeid, comment string) { - h.onodes[oid].indirect = true - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d is indirect: %s\n", oid, comment) - } -} - -// Adds an edge dst-->src. -// Note the unusual convention: edges are dependency (contraflow) edges. -func (h *hvn) addEdge(odst, osrc onodeid) { - h.onodes[odst].edges.Insert(int(osrc)) - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d --> o%d\n", odst, osrc) - } -} - -func (h *hvn) addImplicitEdge(odst, osrc onodeid) { - h.onodes[odst].implicit.Insert(int(osrc)) - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d ~~> o%d\n", odst, osrc) - } -} - -// visit implements the depth-first search of Tarjan's SCC algorithm. -// Precondition: x is canonical. -func (h *hvn) visit(x onodeid) { - h.checkCanonical(x) - xo := h.onodes[x] - xo.index = h.index - xo.lowlink = h.index - h.index++ - - h.stack = append(h.stack, x) // push - assert(xo.scc == 0, "node revisited") - xo.scc = -1 - - var deps []int - deps = xo.edges.AppendTo(deps) - deps = xo.implicit.AppendTo(deps) - - for _, y := range deps { - // Loop invariant: x is canonical. - - y := h.find(onodeid(y)) - - if x == y { - continue // nodes already coalesced - } - - xo := h.onodes[x] - yo := h.onodes[y] - - switch { - case yo.scc > 0: - // y is already a collapsed SCC - - case yo.scc < 0: - // y is on the stack, and thus in the current SCC. - if yo.index < xo.lowlink { - xo.lowlink = yo.index - } - - default: - // y is unvisited; visit it now. - h.visit(y) - // Note: x and y are now non-canonical. - - x = h.find(onodeid(x)) - - if yo.lowlink < xo.lowlink { - xo.lowlink = yo.lowlink - } - } - } - h.checkCanonical(x) - - // Is x the root of an SCC? - if xo.lowlink == xo.index { - // Coalesce all nodes in the SCC. - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "scc o%d\n", x) - } - for { - // Pop y from stack. - i := len(h.stack) - 1 - y := h.stack[i] - h.stack = h.stack[:i] - - h.checkCanonical(x) - xo := h.onodes[x] - h.checkCanonical(y) - yo := h.onodes[y] - - if xo == yo { - // SCC is complete. - xo.scc = 1 - h.labelSCC(x) - break - } - h.coalesce(x, y) - } - } -} - -// Precondition: x is canonical. -func (h *hvn) labelSCC(x onodeid) { - h.checkCanonical(x) - xo := h.onodes[x] - xpe := &xo.peLabels - - // All indirect nodes get new labels. - if xo.indirect { - label := h.nextLabel() - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tcreate p%d: indirect SCC\n", label) - fmt.Fprintf(h.log, "\to%d has p%d\n", x, label) - } - - // Remove pre-labeling, in case a direct pre-labeled node was - // merged with an indirect one. - xpe.Clear() - xpe.Insert(int(label)) - - return - } - - // Invariant: all peLabels sets are non-empty. - // Those that are logically empty contain zero as their sole element. - // No other sets contains zero. - - // Find all labels coming in to the coalesced SCC node. - for _, y := range xo.edges.AppendTo(nil) { - y := h.find(onodeid(y)) - if y == x { - continue // already coalesced - } - ype := &h.onodes[y].peLabels - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tedge from o%d = %s\n", y, ype) - } - - if ype.IsEmpty() { - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tnode has no PE label\n") - } - } - assert(!ype.IsEmpty(), "incoming node has no PE label") - - if ype.Has(0) { - // {0} represents a non-pointer. - assert(ype.Len() == 1, "PE set contains {0, ...}") - } else { - xpe.UnionWith(ype) - } - } - - switch xpe.Len() { - case 0: - // SCC has no incoming non-zero PE labels: it is a non-pointer. - xpe.Insert(0) - - case 1: - // already a singleton - - default: - // SCC has multiple incoming non-zero PE labels. - // Find the canonical label representing this set. - // We use String() as a fingerprint consistent with Equals(). - key := xpe.String() - label, ok := h.hvnLabel[key] - if !ok { - label = h.nextLabel() - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tcreate p%d: union %s\n", label, xpe.String()) - } - h.hvnLabel[key] = label - } - xpe.Clear() - xpe.Insert(int(label)) - } - - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\to%d has p%d\n", x, xpe.Min()) - } -} - -// coalesce combines two nodes in the offline constraint graph. -// Precondition: x and y are canonical. -func (h *hvn) coalesce(x, y onodeid) { - xo := h.onodes[x] - yo := h.onodes[y] - - // x becomes y's canonical representative. - yo.rep = x - - if debugHVNVerbose && h.log != nil { - fmt.Fprintf(h.log, "\tcoalesce o%d into o%d\n", y, x) - } - - // x accumulates y's edges. - xo.edges.UnionWith(&yo.edges) - yo.edges.Clear() - - // x accumulates y's implicit edges. - xo.implicit.UnionWith(&yo.implicit) - yo.implicit.Clear() - - // x accumulates y's pointer-equivalence labels. - xo.peLabels.UnionWith(&yo.peLabels) - yo.peLabels.Clear() - - // x accumulates y's indirect flag. - if yo.indirect { - xo.indirect = true - } -} - -// simplify computes a degenerate renumbering of nodeids from the PE -// labels assigned by the hvn, and uses it to simplify the main -// constraint graph, eliminating non-pointer nodes and duplicate -// constraints. -func (h *hvn) simplify() { - // canon maps each peLabel to its canonical main node. - canon := make([]nodeid, h.label) - for i := range canon { - canon[i] = nodeid(h.N) // indicates "unset" - } - - // mapping maps each main node index to the index of the canonical node. - mapping := make([]nodeid, len(h.a.nodes)) - - for id := range h.a.nodes { - id := nodeid(id) - if id == 0 { - canon[0] = 0 - mapping[0] = 0 - continue - } - oid := h.find(onodeid(id)) - peLabels := &h.onodes[oid].peLabels - assert(peLabels.Len() == 1, "PE class is not a singleton") - label := peLabel(peLabels.Min()) - - canonID := canon[label] - if canonID == nodeid(h.N) { - // id becomes the representative of the PE label. - canonID = id - canon[label] = canonID - - if h.a.log != nil { - fmt.Fprintf(h.a.log, "\tpts(n%d) is canonical : \t(%s)\n", - id, h.a.nodes[id].typ) - } - - } else { - // Link the solver states for the two nodes. - assert(h.a.nodes[canonID].solve != nil, "missing solver state") - h.a.nodes[id].solve = h.a.nodes[canonID].solve - - if h.a.log != nil { - // TODO(adonovan): debug: reorganize the log so it prints - // one line: - // pe y = x1, ..., xn - // for each canonical y. Requires allocation. - fmt.Fprintf(h.a.log, "\tpts(n%d) = pts(n%d) : %s\n", - id, canonID, h.a.nodes[id].typ) - } - } - - mapping[id] = canonID - } - - // Renumber the constraints, eliminate duplicates, and eliminate - // any containing non-pointers (n0). - addrs := make(map[addrConstraint]bool) - copys := make(map[copyConstraint]bool) - loads := make(map[loadConstraint]bool) - stores := make(map[storeConstraint]bool) - offsetAddrs := make(map[offsetAddrConstraint]bool) - untags := make(map[untagConstraint]bool) - typeFilters := make(map[typeFilterConstraint]bool) - invokes := make(map[invokeConstraint]bool) - - nbefore := len(h.a.constraints) - cc := h.a.constraints[:0] // in-situ compaction - for _, c := range h.a.constraints { - // Renumber. - switch c := c.(type) { - case *addrConstraint: - // Don't renumber c.src since it is the label of - // an addressable object and will appear in PT sets. - c.dst = mapping[c.dst] - default: - c.renumber(mapping) - } - - if c.ptr() == 0 { - continue // skip: constraint attached to non-pointer - } - - var dup bool - switch c := c.(type) { - case *addrConstraint: - _, dup = addrs[*c] - addrs[*c] = true - - case *copyConstraint: - if c.src == c.dst { - continue // skip degenerate copies - } - if c.src == 0 { - continue // skip copy from non-pointer - } - _, dup = copys[*c] - copys[*c] = true - - case *loadConstraint: - if c.src == 0 { - continue // skip load from non-pointer - } - _, dup = loads[*c] - loads[*c] = true - - case *storeConstraint: - if c.src == 0 { - continue // skip store from non-pointer - } - _, dup = stores[*c] - stores[*c] = true - - case *offsetAddrConstraint: - if c.src == 0 { - continue // skip offset from non-pointer - } - _, dup = offsetAddrs[*c] - offsetAddrs[*c] = true - - case *untagConstraint: - if c.src == 0 { - continue // skip untag of non-pointer - } - _, dup = untags[*c] - untags[*c] = true - - case *typeFilterConstraint: - if c.src == 0 { - continue // skip filter of non-pointer - } - _, dup = typeFilters[*c] - typeFilters[*c] = true - - case *invokeConstraint: - if c.params == 0 { - panic("non-pointer invoke.params") - } - if c.iface == 0 { - continue // skip invoke on non-pointer - } - _, dup = invokes[*c] - invokes[*c] = true - - default: - // We don't bother de-duping advanced constraints - // (e.g. reflection) since they are uncommon. - - // Eliminate constraints containing non-pointer nodeids. - // - // We use reflection to find the fields to avoid - // adding yet another method to constraint. - // - // TODO(adonovan): experiment with a constraint - // method that returns a slice of pointers to - // nodeids fields to enable uniform iteration; - // the renumber() method could be removed and - // implemented using the new one. - // - // TODO(adonovan): opt: this is unsound since - // some constraints still have an effect if one - // of the operands is zero: rVCall, rVMapIndex, - // rvSetMapIndex. Handle them specially. - rtNodeid := reflect.TypeOf(nodeid(0)) - x := reflect.ValueOf(c).Elem() - for i, nf := 0, x.NumField(); i < nf; i++ { - f := x.Field(i) - if f.Type() == rtNodeid { - if f.Uint() == 0 { - dup = true // skip it - break - } - } - } - } - if dup { - continue // skip duplicates - } - - cc = append(cc, c) - } - h.a.constraints = cc - - if h.log != nil { - fmt.Fprintf(h.log, "#constraints: was %d, now %d\n", nbefore, len(h.a.constraints)) - } -} - -// find returns the canonical onodeid for x. -// (The onodes form a disjoint set forest.) -func (h *hvn) find(x onodeid) onodeid { - // TODO(adonovan): opt: this is a CPU hotspot. Try "union by rank". - xo := h.onodes[x] - rep := xo.rep - if rep != x { - rep = h.find(rep) // simple path compression - xo.rep = rep - } - return rep -} - -func (h *hvn) checkCanonical(x onodeid) { - if debugHVN { - assert(x == h.find(x), "not canonical") - } -} - -func assert(p bool, msg string) { - if debugHVN && !p { - panic("assertion failed: " + msg) - } -} diff --git a/go/pointer/intrinsics.go b/go/pointer/intrinsics.go deleted file mode 100644 index 43bb8e8fcf4..00000000000 --- a/go/pointer/intrinsics.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This package defines the treatment of intrinsics, i.e. library -// functions requiring special analytical treatment. -// -// Most of these are C or assembly functions, but even some Go -// functions require may special treatment if the analysis completely -// replaces the implementation of an API such as reflection. - -// TODO(adonovan): support a means of writing analytic summaries in -// the target code, so that users can summarise the effects of their -// own C functions using a snippet of Go. - -import ( - "fmt" - "go/types" - - "golang.org/x/tools/go/ssa" -) - -// Instances of 'intrinsic' generate analysis constraints for calls to -// intrinsic functions. -// Implementations may exploit information from the calling site -// via cgn.callersite; for shared contours this is nil. -type intrinsic func(a *analysis, cgn *cgnode) - -// Initialized in explicit init() to defeat (spurious) initialization -// cycle error. -var intrinsicsByName = make(map[string]intrinsic) - -func init() { - // Key strings are from Function.String(). - // That little dot ۰ is an Arabic zero numeral (U+06F0), - // categories [Nd]. - for name, fn := range map[string]intrinsic{ - // Other packages. - "bytes.Equal": ext۰NoEffect, - "bytes.IndexByte": ext۰NoEffect, - "crypto/aes.decryptBlockAsm": ext۰NoEffect, - "crypto/aes.encryptBlockAsm": ext۰NoEffect, - "crypto/aes.expandKeyAsm": ext۰NoEffect, - "crypto/aes.hasAsm": ext۰NoEffect, - "crypto/md5.block": ext۰NoEffect, - "crypto/rc4.xorKeyStream": ext۰NoEffect, - "crypto/sha1.block": ext۰NoEffect, - "crypto/sha256.block": ext۰NoEffect, - "hash/crc32.castagnoliSSE42": ext۰NoEffect, - "hash/crc32.haveSSE42": ext۰NoEffect, - "math.Abs": ext۰NoEffect, - "math.Acos": ext۰NoEffect, - "math.Asin": ext۰NoEffect, - "math.Atan": ext۰NoEffect, - "math.Atan2": ext۰NoEffect, - "math.Ceil": ext۰NoEffect, - "math.Cos": ext۰NoEffect, - "math.Dim": ext۰NoEffect, - "math.Exp": ext۰NoEffect, - "math.Exp2": ext۰NoEffect, - "math.Expm1": ext۰NoEffect, - "math.Float32bits": ext۰NoEffect, - "math.Float32frombits": ext۰NoEffect, - "math.Float64bits": ext۰NoEffect, - "math.Float64frombits": ext۰NoEffect, - "math.Floor": ext۰NoEffect, - "math.Frexp": ext۰NoEffect, - "math.Hypot": ext۰NoEffect, - "math.Ldexp": ext۰NoEffect, - "math.Log": ext۰NoEffect, - "math.Log10": ext۰NoEffect, - "math.Log1p": ext۰NoEffect, - "math.Log2": ext۰NoEffect, - "math.Max": ext۰NoEffect, - "math.Min": ext۰NoEffect, - "math.Mod": ext۰NoEffect, - "math.Modf": ext۰NoEffect, - "math.Remainder": ext۰NoEffect, - "math.Sin": ext۰NoEffect, - "math.Sincos": ext۰NoEffect, - "math.Sqrt": ext۰NoEffect, - "math.Tan": ext۰NoEffect, - "math.Trunc": ext۰NoEffect, - "math/big.addMulVVW": ext۰NoEffect, - "math/big.addVV": ext۰NoEffect, - "math/big.addVW": ext۰NoEffect, - "math/big.bitLen": ext۰NoEffect, - "math/big.divWVW": ext۰NoEffect, - "math/big.divWW": ext۰NoEffect, - "math/big.mulAddVWW": ext۰NoEffect, - "math/big.mulWW": ext۰NoEffect, - "math/big.shlVU": ext۰NoEffect, - "math/big.shrVU": ext۰NoEffect, - "math/big.subVV": ext۰NoEffect, - "math/big.subVW": ext۰NoEffect, - "net.runtime_Semacquire": ext۰NoEffect, - "net.runtime_Semrelease": ext۰NoEffect, - "net.runtime_pollClose": ext۰NoEffect, - "net.runtime_pollOpen": ext۰NoEffect, - "net.runtime_pollReset": ext۰NoEffect, - "net.runtime_pollServerInit": ext۰NoEffect, - "net.runtime_pollSetDeadline": ext۰NoEffect, - "net.runtime_pollUnblock": ext۰NoEffect, - "net.runtime_pollWait": ext۰NoEffect, - "net.runtime_pollWaitCanceled": ext۰NoEffect, - "os.epipecheck": ext۰NoEffect, - // All other runtime functions are treated as NoEffect. - "runtime.SetFinalizer": ext۰runtime۰SetFinalizer, - "strings.IndexByte": ext۰NoEffect, - "sync.runtime_Semacquire": ext۰NoEffect, - "sync.runtime_Semrelease": ext۰NoEffect, - "sync.runtime_Syncsemacquire": ext۰NoEffect, - "sync.runtime_Syncsemcheck": ext۰NoEffect, - "sync.runtime_Syncsemrelease": ext۰NoEffect, - "sync.runtime_procPin": ext۰NoEffect, - "sync.runtime_procUnpin": ext۰NoEffect, - "sync.runtime_registerPool": ext۰NoEffect, - "sync/atomic.AddInt32": ext۰NoEffect, - "sync/atomic.AddInt64": ext۰NoEffect, - "sync/atomic.AddUint32": ext۰NoEffect, - "sync/atomic.AddUint64": ext۰NoEffect, - "sync/atomic.AddUintptr": ext۰NoEffect, - "sync/atomic.CompareAndSwapInt32": ext۰NoEffect, - "sync/atomic.CompareAndSwapUint32": ext۰NoEffect, - "sync/atomic.CompareAndSwapUint64": ext۰NoEffect, - "sync/atomic.CompareAndSwapUintptr": ext۰NoEffect, - "sync/atomic.LoadInt32": ext۰NoEffect, - "sync/atomic.LoadInt64": ext۰NoEffect, - "sync/atomic.LoadPointer": ext۰NoEffect, // ignore unsafe.Pointers - "sync/atomic.LoadUint32": ext۰NoEffect, - "sync/atomic.LoadUint64": ext۰NoEffect, - "sync/atomic.LoadUintptr": ext۰NoEffect, - "sync/atomic.StoreInt32": ext۰NoEffect, - "sync/atomic.StorePointer": ext۰NoEffect, // ignore unsafe.Pointers - "sync/atomic.StoreUint32": ext۰NoEffect, - "sync/atomic.StoreUintptr": ext۰NoEffect, - "syscall.Close": ext۰NoEffect, - "syscall.Exit": ext۰NoEffect, - "syscall.Getpid": ext۰NoEffect, - "syscall.Getwd": ext۰NoEffect, - "syscall.Kill": ext۰NoEffect, - "syscall.RawSyscall": ext۰NoEffect, - "syscall.RawSyscall6": ext۰NoEffect, - "syscall.Syscall": ext۰NoEffect, - "syscall.Syscall6": ext۰NoEffect, - "syscall.runtime_AfterFork": ext۰NoEffect, - "syscall.runtime_BeforeFork": ext۰NoEffect, - "syscall.setenv_c": ext۰NoEffect, - "time.Sleep": ext۰NoEffect, - "time.now": ext۰NoEffect, - "time.startTimer": ext۰time۰startTimer, - "time.stopTimer": ext۰NoEffect, - } { - intrinsicsByName[name] = fn - } -} - -// findIntrinsic returns the constraint generation function for an -// intrinsic function fn, or nil if the function should be handled normally. -func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic { - // Consult the *Function-keyed cache. - // A cached nil indicates a normal non-intrinsic function. - impl, ok := a.intrinsics[fn] - if !ok { - impl = intrinsicsByName[fn.String()] // may be nil - - if a.isReflect(fn) { - if !a.config.Reflection { - impl = ext۰NoEffect // reflection disabled - } else if impl == nil { - // Ensure all "reflect" code is treated intrinsically. - impl = ext۰NotYetImplemented - } - } else if impl == nil && fn.Pkg != nil && fn.Pkg.Pkg.Path() == "runtime" { - // Ignore "runtime" (except SetFinalizer): - // it has few interesting effects on aliasing - // and is full of unsafe code we can't analyze. - impl = ext۰NoEffect - } - - a.intrinsics[fn] = impl - } - return impl -} - -// isReflect reports whether fn belongs to the "reflect" package. -func (a *analysis) isReflect(fn *ssa.Function) bool { - if a.reflectValueObj == nil { - return false // "reflect" package not loaded - } - reflectPackage := a.reflectValueObj.Pkg() - if fn.Pkg != nil && fn.Pkg.Pkg == reflectPackage { - return true - } - // Synthetic wrappers have a nil Pkg, so they slip through the - // previous check. Check the receiver package. - // TODO(adonovan): should synthetic wrappers have a non-nil Pkg? - if recv := fn.Signature.Recv(); recv != nil { - if named, ok := deref(recv.Type()).(*types.Named); ok { - if named.Obj().Pkg() == reflectPackage { - return true // e.g. wrapper of (reflect.Value).f - } - } - } - return false -} - -// A trivial intrinsic suitable for any function that does not: -// 1) induce aliases between its arguments or any global variables; -// 2) call any functions; or -// 3) create any labels. -// -// Many intrinsics (such as CompareAndSwapInt32) have a fourth kind of -// effect: loading or storing through a pointer. Though these could -// be significant, we deliberately ignore them because they are -// generally not worth the effort. -// -// We sometimes violate condition #3 if the function creates only -// non-function labels, as the control-flow graph is still sound. -func ext۰NoEffect(a *analysis, cgn *cgnode) {} - -func ext۰NotYetImplemented(a *analysis, cgn *cgnode) { - fn := cgn.fn - a.warnf(fn.Pos(), "unsound: intrinsic treatment of %s not yet implemented", fn) -} - -// ---------- func runtime.SetFinalizer(x, f interface{}) ---------- - -// runtime.SetFinalizer(x, f) -type runtimeSetFinalizerConstraint struct { - targets nodeid // (indirect) - f nodeid // (ptr) - x nodeid -} - -func (c *runtimeSetFinalizerConstraint) ptr() nodeid { return c.f } -func (c *runtimeSetFinalizerConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.targets), "SetFinalizer.targets") -} -func (c *runtimeSetFinalizerConstraint) renumber(mapping []nodeid) { - c.targets = mapping[c.targets] - c.f = mapping[c.f] - c.x = mapping[c.x] -} - -func (c *runtimeSetFinalizerConstraint) String() string { - return fmt.Sprintf("runtime.SetFinalizer(n%d, n%d)", c.x, c.f) -} - -func (c *runtimeSetFinalizerConstraint) solve(a *analysis, delta *nodeset) { - for _, fObj := range delta.AppendTo(a.deltaSpace) { - tDyn, f, indirect := a.taggedValue(nodeid(fObj)) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - tSig, ok := tDyn.Underlying().(*types.Signature) - if !ok { - continue // not a function - } - if tSig.Recv() != nil { - panic(tSig) - } - if tSig.Params().Len() != 1 { - continue // not a unary function - } - - // Extract x to tmp. - tx := tSig.Params().At(0).Type() - tmp := a.addNodes(tx, "SetFinalizer.tmp") - a.typeAssert(tx, tmp, c.x, false) - - // Call f(tmp). - a.store(f, tmp, 1, a.sizeof(tx)) - - // Add dynamic call target. - if a.onlineCopy(c.targets, f) { - a.addWork(c.targets) - } - } -} - -func ext۰runtime۰SetFinalizer(a *analysis, cgn *cgnode) { - // This is the shared contour, used for dynamic calls. - targets := a.addOneNode(tInvalid, "SetFinalizer.targets", nil) - cgn.sites = append(cgn.sites, &callsite{targets: targets}) - params := a.funcParams(cgn.obj) - a.addConstraint(&runtimeSetFinalizerConstraint{ - targets: targets, - x: params, - f: params + 1, - }) -} - -// ---------- func time.startTimer(t *runtimeTimer) ---------- - -// time.StartTimer(t) -type timeStartTimerConstraint struct { - targets nodeid // (indirect) - t nodeid // (ptr) -} - -func (c *timeStartTimerConstraint) ptr() nodeid { return c.t } -func (c *timeStartTimerConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.targets), "StartTimer.targets") -} -func (c *timeStartTimerConstraint) renumber(mapping []nodeid) { - c.targets = mapping[c.targets] - c.t = mapping[c.t] -} - -func (c *timeStartTimerConstraint) String() string { - return fmt.Sprintf("time.startTimer(n%d)", c.t) -} - -func (c *timeStartTimerConstraint) solve(a *analysis, delta *nodeset) { - for _, tObj := range delta.AppendTo(a.deltaSpace) { - t := nodeid(tObj) - - // We model startTimer as if it was defined thus: - // func startTimer(t *runtimeTimer) { t.f(t.arg) } - - // We hard-code the field offsets of time.runtimeTimer: - // type runtimeTimer struct { - // 0 __identity__ - // 1 i int32 - // 2 when int64 - // 3 period int64 - // 4 f func(int64, interface{}) - // 5 arg interface{} - // } - f := t + 4 - arg := t + 5 - - // store t.arg to t.f.params[0] - // (offset 1 => skip identity) - a.store(f, arg, 1, 1) - - // Add dynamic call target. - if a.onlineCopy(c.targets, f) { - a.addWork(c.targets) - } - } -} - -func ext۰time۰startTimer(a *analysis, cgn *cgnode) { - // This is the shared contour, used for dynamic calls. - targets := a.addOneNode(tInvalid, "startTimer.targets", nil) - cgn.sites = append(cgn.sites, &callsite{targets: targets}) - params := a.funcParams(cgn.obj) - a.addConstraint(&timeStartTimerConstraint{ - targets: targets, - t: params, - }) -} diff --git a/go/pointer/labels.go b/go/pointer/labels.go deleted file mode 100644 index 5a1e1999c9a..00000000000 --- a/go/pointer/labels.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import ( - "fmt" - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/ssa" -) - -// A Label is an entity that may be pointed to by a pointer, map, -// channel, 'func', slice or interface. -// -// Labels include: -// - functions -// - globals -// - tagged objects, representing interfaces and reflect.Values -// - arrays created by conversions (e.g. []byte("foo"), []byte(s)) -// - stack- and heap-allocated variables (including composite literals) -// - channels, maps and arrays created by make() -// - intrinsic or reflective operations that allocate (e.g. append, reflect.New) -// - intrinsic objects, e.g. the initial array behind os.Args. -// - and their subelements, e.g. "alloc.y[*].z" -// -// Labels are so varied that they defy good generalizations; -// some have no value, no callgraph node, or no position. -// Many objects have types that are inexpressible in Go: -// maps, channels, functions, tagged objects. -// -// At most one of Value() or ReflectType() may return non-nil. -type Label struct { - obj *object // the addressable memory location containing this label - subelement *fieldInfo // subelement path within obj, e.g. ".a.b[*].c" -} - -// Value returns the ssa.Value that allocated this label's object, if any. -func (l Label) Value() ssa.Value { - val, _ := l.obj.data.(ssa.Value) - return val -} - -// ReflectType returns the type represented by this label if it is an -// reflect.rtype instance object or *reflect.rtype-tagged object. -func (l Label) ReflectType() types.Type { - rtype, _ := l.obj.data.(types.Type) - return rtype -} - -// Path returns the path to the subelement of the object containing -// this label. For example, ".x[*].y". -func (l Label) Path() string { - return l.subelement.path() -} - -// Pos returns the position of this label, if known, zero otherwise. -func (l Label) Pos() token.Pos { - switch data := l.obj.data.(type) { - case ssa.Value: - return data.Pos() - case types.Type: - if nt, ok := deref(data).(*types.Named); ok { - return nt.Obj().Pos() - } - } - if cgn := l.obj.cgn; cgn != nil { - return cgn.fn.Pos() - } - return token.NoPos -} - -// String returns the printed form of this label. -// -// Examples: Object type: -// -// x (a variable) -// (sync.Mutex).Lock (a function) -// convert (array created by conversion) -// makemap (map allocated via make) -// makechan (channel allocated via make) -// makeinterface (tagged object allocated by makeinterface) -// (allocation in instrinsic) -// sync.Mutex (a reflect.rtype instance) -// (an intrinsic object) -// -// Labels within compound objects have subelement paths: -// -// x.y[*].z (a struct variable, x) -// append.y[*].z (array allocated by append) -// makeslice.y[*].z (array allocated via make) -// -// TODO(adonovan): expose func LabelString(*types.Package, Label). -func (l Label) String() string { - var s string - switch v := l.obj.data.(type) { - case types.Type: - return v.String() - - case string: - s = v // an intrinsic object (e.g. os.Args[*]) - - case nil: - if l.obj.cgn != nil { - // allocation by intrinsic or reflective operation - s = fmt.Sprintf("", l.obj.cgn.fn) - } else { - s = "" // should be unreachable - } - - case *ssa.Function: - s = v.String() - - case *ssa.Global: - s = v.String() - - case *ssa.Const: - s = v.Name() - - case *ssa.Alloc: - s = v.Comment - if s == "" { - s = "alloc" - } - - case *ssa.Call: - // Currently only calls to append can allocate objects. - if v.Call.Value.(*ssa.Builtin).Object().Name() != "append" { - panic("unhandled *ssa.Call label: " + v.Name()) - } - s = "append" - - case *ssa.MakeMap, *ssa.MakeChan, *ssa.MakeSlice, *ssa.Convert: - s = strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", v), "*ssa.")) - - case *ssa.MakeInterface: - // MakeInterface is usually implicit in Go source (so - // Pos()==0), and tagged objects may be allocated - // synthetically (so no *MakeInterface data). - s = "makeinterface:" + v.X.Type().String() - - default: - panic(fmt.Sprintf("unhandled object data type: %T", v)) - } - - return s + l.subelement.path() -} diff --git a/go/pointer/opt.go b/go/pointer/opt.go deleted file mode 100644 index bbd411c2e03..00000000000 --- a/go/pointer/opt.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file implements renumbering, a pre-solver optimization to -// improve the efficiency of the solver's points-to set representation. -// -// TODO(adonovan): rename file "renumber.go" - -import "fmt" - -// renumber permutes a.nodes so that all nodes within an addressable -// object appear before all non-addressable nodes, maintaining the -// order of nodes within the same object (as required by offsetAddr). -// -// renumber must update every nodeid in the analysis (constraints, -// Pointers, callgraph, etc) to reflect the new ordering. -// -// This is an optimisation to increase the locality and efficiency of -// sparse representations of points-to sets. (Typically only about -// 20% of nodes are within an object.) -// -// NB: nodes added during solving (e.g. for reflection, SetFinalizer) -// will be appended to the end. -// -// Renumbering makes the PTA log inscrutable. To aid debugging, later -// phases (e.g. HVN) must not rely on it having occurred. -func (a *analysis) renumber() { - if a.log != nil { - fmt.Fprintf(a.log, "\n\n==== Renumbering\n\n") - } - - N := nodeid(len(a.nodes)) - newNodes := make([]*node, N) - renumbering := make([]nodeid, N) // maps old to new - - var i, j nodeid - - // The zero node is special. - newNodes[j] = a.nodes[i] - renumbering[i] = j - i++ - j++ - - // Pass 1: object nodes. - for i < N { - obj := a.nodes[i].obj - if obj == nil { - i++ - continue - } - - end := i + nodeid(obj.size) - for i < end { - newNodes[j] = a.nodes[i] - renumbering[i] = j - i++ - j++ - } - } - nobj := j - - // Pass 2: non-object nodes. - for i = 1; i < N; { - obj := a.nodes[i].obj - if obj != nil { - i += nodeid(obj.size) - continue - } - - newNodes[j] = a.nodes[i] - renumbering[i] = j - i++ - j++ - } - - if j != N { - panic(fmt.Sprintf("internal error: j=%d, N=%d", j, N)) - } - - // Log the remapping table. - if a.log != nil { - fmt.Fprintf(a.log, "Renumbering nodes to improve density:\n") - fmt.Fprintf(a.log, "(%d object nodes of %d total)\n", nobj, N) - for old, new := range renumbering { - fmt.Fprintf(a.log, "\tn%d -> n%d\n", old, new) - } - } - - // Now renumber all existing nodeids to use the new node permutation. - // It is critical that all reachable nodeids are accounted for! - - // Renumber nodeids in queried Pointers. - for v, ptr := range a.result.Queries { - ptr.n = renumbering[ptr.n] - a.result.Queries[v] = ptr - } - for v, ptr := range a.result.IndirectQueries { - ptr.n = renumbering[ptr.n] - a.result.IndirectQueries[v] = ptr - } - for _, queries := range a.config.extendedQueries { - for _, query := range queries { - if query.ptr != nil { - query.ptr.n = renumbering[query.ptr.n] - } - } - } - - // Renumber nodeids in global objects. - for v, id := range a.globalobj { - a.globalobj[v] = renumbering[id] - } - - // Renumber nodeids in constraints. - for _, c := range a.constraints { - c.renumber(renumbering) - } - - // Renumber nodeids in the call graph. - for _, cgn := range a.cgnodes { - cgn.obj = renumbering[cgn.obj] - for _, site := range cgn.sites { - site.targets = renumbering[site.targets] - } - } - - a.nodes = newNodes -} diff --git a/go/pointer/pointer_go117_test.go b/go/pointer/pointer_go117_test.go deleted file mode 100644 index 83162aca325..00000000000 --- a/go/pointer/pointer_go117_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// No testdata on Android. - -//go:build !android && go1.17 -// +build !android,go1.17 - -package pointer_test - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "golang.org/x/tools/internal/testenv" -) - -func TestSliceToArrayPointer(t *testing.T) { - // Based on TestInput. Keep this up to date with that. - filename := "testdata/arrays_go117.go" - - if testing.Short() { - t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113") - } - testenv.NeedsGoBuild(t) - - wd, err := os.Getwd() - if err != nil { - t.Fatalf("os.Getwd: %s", err) - } - fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd) - - content, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatalf("couldn't read file '%s': %s", filename, err) - } - fpath, err := filepath.Abs(filename) - if err != nil { - t.Errorf("couldn't get absolute path for '%s': %s", filename, err) - } - - if !doOneInput(t, string(content), fpath) { - t.Fail() - } -} diff --git a/go/pointer/pointer_race_test.go b/go/pointer/pointer_race_test.go deleted file mode 100644 index d3c9b475e25..00000000000 --- a/go/pointer/pointer_race_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build race -// +build race - -package pointer_test - -func init() { - raceEnabled = true -} diff --git a/go/pointer/pointer_test.go b/go/pointer/pointer_test.go deleted file mode 100644 index ffca57291e7..00000000000 --- a/go/pointer/pointer_test.go +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// No testdata on Android. - -//go:build !android -// +build !android - -package pointer_test - -// This test uses 'expectation' comments embedded within testdata/*.go -// files to specify the expected pointer analysis behaviour. -// See below for grammar. - -import ( - "bytes" - "errors" - "fmt" - "go/token" - "go/types" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "testing" - "unsafe" - - "golang.org/x/tools/go/callgraph" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/go/pointer" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/testenv" - "golang.org/x/tools/internal/typeparams" -) - -var inputs = []string{ - "testdata/a_test.go", - "testdata/another.go", - "testdata/arrayreflect.go", - "testdata/arrays.go", - "testdata/channels.go", - "testdata/chanreflect.go", - "testdata/context.go", - "testdata/conv.go", - "testdata/extended.go", - "testdata/finalizer.go", - "testdata/flow.go", - "testdata/fmtexcerpt.go", - "testdata/func.go", - "testdata/funcreflect.go", - "testdata/hello.go", // NB: causes spurious failure of HVN cross-check - "testdata/interfaces.go", - "testdata/issue9002.go", - "testdata/mapreflect.go", - "testdata/maps.go", - "testdata/panic.go", - "testdata/recur.go", - "testdata/reflect.go", - "testdata/rtti.go", - "testdata/structreflect.go", - "testdata/structs.go", - // "testdata/timer.go", // TODO(adonovan): fix broken assumptions about runtime timers -} - -var raceEnabled = false - -// Expectation grammar: -// -// @calls f -> g -// -// A 'calls' expectation asserts that edge (f, g) appears in the -// callgraph. f and g are notated as per Function.String(), which -// may contain spaces (e.g. promoted method in anon struct). -// -// @pointsto a | b | c -// -// A 'pointsto' expectation asserts that the points-to set of its -// operand contains exactly the set of labels {a,b,c} notated as per -// labelString. -// -// A 'pointsto' expectation must appear on the same line as a -// print(x) statement; the expectation's operand is x. -// -// If one of the strings is "...", the expectation asserts that the -// points-to set at least the other labels. -// -// We use '|' because label names may contain spaces, e.g. methods -// of anonymous structs. -// -// Assertions within generic functions are treated as a union of all -// of the instantiations. -// -// From a theoretical perspective, concrete types in interfaces are -// labels too, but they are represented differently and so have a -// different expectation, @types, below. -// -// @types t | u | v -// -// A 'types' expectation asserts that the set of possible dynamic -// types of its interface operand is exactly {t,u,v}, notated per -// go/types.Type.String(). In other words, it asserts that the type -// component of the interface may point to that set of concrete type -// literals. It also works for reflect.Value, though the types -// needn't be concrete in that case. -// -// A 'types' expectation must appear on the same line as a -// print(x) statement; the expectation's operand is x. -// -// If one of the strings is "...", the expectation asserts that the -// interface's type may point to at least the other types. -// -// We use '|' because type names may contain spaces. -// -// Assertions within generic functions are treated as a union of all -// of the instantiations. -// -// @warning "regexp" -// -// A 'warning' expectation asserts that the analysis issues a -// warning that matches the regular expression within the string -// literal. -// -// @line id -// -// A line directive associates the name "id" with the current -// file:line. The string form of labels will use this id instead of -// a file:line, making @pointsto expectations more robust against -// perturbations in the source file. -// (NB, anon functions still include line numbers.) -type expectation struct { - kind string // "pointsto" | "pointstoquery" | "types" | "calls" | "warning" - filepath string - linenum int // source line number, 1-based - args []string - query string // extended query - extended []*pointer.Pointer // extended query pointer [per instantiation] - types []types.Type // for types -} - -func (e *expectation) String() string { - return fmt.Sprintf("@%s[%s]", e.kind, strings.Join(e.args, " | ")) -} - -func (e *expectation) errorf(format string, args ...interface{}) { - fmt.Printf("%s:%d: ", e.filepath, e.linenum) - fmt.Printf(format, args...) - fmt.Println() -} - -func (e *expectation) needsProbe() bool { - return e.kind == "pointsto" || e.kind == "pointstoquery" || e.kind == "types" -} - -// Find probes (call to print(x)) of same source file/line as expectation. -// -// May match multiple calls for different instantiations. -func findProbes(prog *ssa.Program, probes map[*ssa.CallCommon]bool, e *expectation) []*ssa.CallCommon { - var calls []*ssa.CallCommon - for call := range probes { - pos := prog.Fset.Position(call.Pos()) - if pos.Line == e.linenum && pos.Filename == e.filepath { - // TODO(adonovan): send this to test log (display only on failure). - // fmt.Printf("%s:%d: info: found probe for %s: %s\n", - // e.filepath, e.linenum, e, p.arg0) // debugging - calls = append(calls, call) - } - } - return calls -} - -// Find points to sets of probes (call to print(x)). -func probesPointTo(calls []*ssa.CallCommon, queries map[ssa.Value]pointer.Pointer) []pointer.PointsToSet { - ptss := make([]pointer.PointsToSet, len(calls)) - for i, call := range calls { - ptss[i] = queries[call.Args[0]].PointsTo() - } - return ptss -} - -// Find the types of the probes (call to print(x)). -// Returns an error if type of the probe cannot point. -func probesPointToTypes(calls []*ssa.CallCommon) ([]types.Type, error) { - tProbes := make([]types.Type, len(calls)) - for i, call := range calls { - tProbes[i] = call.Args[0].Type() - if !pointer.CanPoint(tProbes[i]) { - return nil, fmt.Errorf("expectation on non-pointerlike operand: %s", tProbes[i]) - } - } - return tProbes, nil -} - -func doOneInput(t *testing.T, input, fpath string) bool { - cfg := &packages.Config{ - Mode: packages.LoadAllSyntax, - Tests: true, - } - pkgs, err := packages.Load(cfg, fpath) - if err != nil { - fmt.Println(err) - return false - } - if packages.PrintErrors(pkgs) > 0 { - fmt.Println("loaded packages have errors") - return false - } - - // SSA creation + building. - mode := ssa.SanityCheckFunctions | ssa.InstantiateGenerics - prog, ssaPkgs := ssautil.AllPackages(pkgs, mode) - prog.Build() - - // main underlying packages.Package. - mainPpkg := pkgs[0] - mainpkg := ssaPkgs[0] - ptrmain := mainpkg // main package for the pointer analysis - if mainpkg.Func("main") == nil { - // For test programs without main, such as testdata/a_test.go, - // the package with the original code is "main [main.test]" and - // the package with the main is "main.test". - for i, pkg := range pkgs { - if pkg.ID == mainPpkg.ID+".test" { - ptrmain = ssaPkgs[i] - } else if pkg.ID == fmt.Sprintf("%s [%s.test]", mainPpkg.ID, mainPpkg.ID) { - mainpkg = ssaPkgs[i] - } - } - } - - // files in mainPpkg. - mainFiles := make(map[*token.File]bool) - for _, syn := range mainPpkg.Syntax { - mainFiles[prog.Fset.File(syn.Pos())] = true - } - - // Find all calls to the built-in print(x). Analytically, - // print is a no-op, but it's a convenient hook for testing - // the PTS of an expression, so our tests use it. - // Exclude generic bodies as these should be dead code for pointer. - // Instance of generics are included. - probes := make(map[*ssa.CallCommon]bool) - for fn := range ssautil.AllFunctions(prog) { - if isGenericBody(fn) { - continue // skip generic bodies - } - // TODO(taking): Switch to a more principled check like fn.declaredPackage() == mainPkg if Origin is exported. - if fn.Pkg == mainpkg || (fn.Pkg == nil && mainFiles[prog.Fset.File(fn.Pos())]) { - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - if instr, ok := instr.(ssa.CallInstruction); ok { - call := instr.Common() - if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" && len(call.Args) == 1 { - probes[instr.Common()] = true - } - } - } - } - } - } - - ok := true - - lineMapping := make(map[string]string) // maps "file:line" to @line tag - - // Parse expectations in this input. - var exps []*expectation - re := regexp.MustCompile("// *@([a-z]*) *(.*)$") - lines := strings.Split(input, "\n") - for linenum, line := range lines { - linenum++ // make it 1-based - if matches := re.FindAllStringSubmatch(line, -1); matches != nil { - match := matches[0] - kind, rest := match[1], match[2] - e := &expectation{kind: kind, filepath: fpath, linenum: linenum} - - if kind == "line" { - if rest == "" { - ok = false - e.errorf("@%s expectation requires identifier", kind) - } else { - lineMapping[fmt.Sprintf("%s:%d", fpath, linenum)] = rest - } - continue - } - - if e.needsProbe() && !strings.Contains(line, "print(") { - ok = false - e.errorf("@%s expectation must follow call to print(x)", kind) - continue - } - - switch kind { - case "pointsto": - e.args = split(rest, "|") - - case "pointstoquery": - args := strings.SplitN(rest, " ", 2) - e.query = args[0] - e.args = split(args[1], "|") - case "types": - for _, typstr := range split(rest, "|") { - var t types.Type = types.Typ[types.Invalid] // means "..." - if typstr != "..." { - tv, err := types.Eval(prog.Fset, mainpkg.Pkg, mainPpkg.Syntax[0].Pos(), typstr) - if err != nil { - ok = false - // Don't print err since its location is bad. - e.errorf("'%s' is not a valid type: %s", typstr, err) - continue - } - t = tv.Type - } - e.types = append(e.types, t) - } - - case "calls": - e.args = split(rest, "->") - // TODO(adonovan): eagerly reject the - // expectation if fn doesn't denote - // existing function, rather than fail - // the expectation after analysis. - if len(e.args) != 2 { - ok = false - e.errorf("@calls expectation wants 'caller -> callee' arguments") - continue - } - - case "warning": - lit, err := strconv.Unquote(strings.TrimSpace(rest)) - if err != nil { - ok = false - e.errorf("couldn't parse @warning operand: %s", err.Error()) - continue - } - e.args = append(e.args, lit) - - default: - ok = false - e.errorf("unknown expectation kind: %s", e) - continue - } - exps = append(exps, e) - } - } - - var log bytes.Buffer - fmt.Fprintf(&log, "Input: %s\n", fpath) - - // Run the analysis. - config := &pointer.Config{ - Reflection: true, - BuildCallGraph: true, - Mains: []*ssa.Package{ptrmain}, - Log: &log, - } - for probe := range probes { - v := probe.Args[0] - pos := prog.Fset.Position(probe.Pos()) - for _, e := range exps { - if e.linenum == pos.Line && e.filepath == pos.Filename && e.kind == "pointstoquery" { - extended, err := config.AddExtendedQuery(v, e.query) - if err != nil { - panic(err) - } - e.extended = append(e.extended, extended) - } - } - if pointer.CanPoint(v.Type()) { - config.AddQuery(v) - } - } - - // Print the log is there was an error or a panic. - complete := false - defer func() { - if !complete || !ok { - log.WriteTo(os.Stderr) - } - }() - - result, err := pointer.Analyze(config) - if err != nil { - panic(err) // internal error in pointer analysis - } - - // Check the expectations. - for _, e := range exps { - var tProbes []types.Type - var calls []*ssa.CallCommon - var ptss []pointer.PointsToSet - if e.needsProbe() { - calls = findProbes(prog, probes, e) - if len(calls) == 0 { - ok = false - e.errorf("unreachable print() statement has expectation %s", e) - continue - } - if e.extended == nil { - ptss = probesPointTo(calls, result.Queries) - } else { - ptss = make([]pointer.PointsToSet, len(e.extended)) - for i, p := range e.extended { - ptss[i] = p.PointsTo() - } - } - - var err error - tProbes, err = probesPointToTypes(calls) - if err != nil { - ok = false - e.errorf(err.Error()) - continue - } - } - - switch e.kind { - case "pointsto", "pointstoquery": - if !checkPointsToExpectation(e, ptss, lineMapping, prog) { - ok = false - } - - case "types": - if !checkTypesExpectation(e, ptss, tProbes) { - ok = false - } - - case "calls": - if !checkCallsExpectation(prog, e, result.CallGraph) { - ok = false - } - - case "warning": - if !checkWarningExpectation(prog, e, result.Warnings) { - ok = false - } - } - } - - complete = true - - // ok = false // debugging: uncomment to always see log - - return ok -} - -func labelString(l *pointer.Label, lineMapping map[string]string, prog *ssa.Program) string { - // Functions and Globals need no pos suffix, - // nor do allocations in intrinsic operations - // (for which we'll print the function name). - switch l.Value().(type) { - case nil, *ssa.Function, *ssa.Global: - return l.String() - } - - str := l.String() - if pos := l.Pos(); pos != token.NoPos { - // Append the position, using a @line tag instead of a line number, if defined. - posn := prog.Fset.Position(pos) - s := fmt.Sprintf("%s:%d", posn.Filename, posn.Line) - if tag, ok := lineMapping[s]; ok { - return fmt.Sprintf("%s@%s:%d", str, tag, posn.Column) - } - str = fmt.Sprintf("%s@%s", str, posn) - } - return str -} - -func checkPointsToExpectation(e *expectation, ptss []pointer.PointsToSet, lineMapping map[string]string, prog *ssa.Program) bool { - expected := make(map[string]int) - surplus := make(map[string]int) - exact := true - for _, g := range e.args { - if g == "..." { - exact = false - continue - } - expected[g]++ - } - // Find the set of labels that the probe's - // argument (x in print(x)) may point to. - for _, pts := range ptss { // treat ptss as union of points-to sets. - for _, label := range pts.Labels() { - name := labelString(label, lineMapping, prog) - if expected[name] > 0 { - expected[name]-- - } else if exact { - surplus[name]++ - } - } - } - // Report multiset difference: - ok := true - for _, count := range expected { - if count > 0 { - ok = false - e.errorf("value does not alias these expected labels: %s", join(expected)) - break - } - } - for _, count := range surplus { - if count > 0 { - ok = false - e.errorf("value may additionally alias these labels: %s", join(surplus)) - break - } - } - return ok -} - -func checkTypesExpectation(e *expectation, ptss []pointer.PointsToSet, typs []types.Type) bool { - var expected typeutil.Map - var surplus typeutil.Map - exact := true - for _, g := range e.types { - if g == types.Typ[types.Invalid] { - exact = false - continue - } - expected.Set(g, struct{}{}) - } - - if len(typs) != len(ptss) { - e.errorf("@types expectation internal error differing number of types(%d) and points to sets (%d)", len(typs), len(ptss)) - return false - } - - // Find the set of types that the probe's - // argument (x in print(x)) may contain. - for i := range ptss { - var Ts []types.Type - if pointer.CanHaveDynamicTypes(typs[i]) { - Ts = ptss[i].DynamicTypes().Keys() - } else { - Ts = append(Ts, typs[i]) // static type - } - for _, T := range Ts { - if expected.At(T) != nil { - expected.Delete(T) - } else if exact { - surplus.Set(T, struct{}{}) - } - } - } - // Report set difference: - ok := true - if expected.Len() > 0 { - ok = false - e.errorf("interface cannot contain these types: %s", expected.KeysString()) - } - if surplus.Len() > 0 { - ok = false - e.errorf("interface may additionally contain these types: %s", surplus.KeysString()) - } - return ok -} - -var errOK = errors.New("OK") - -func checkCallsExpectation(prog *ssa.Program, e *expectation, cg *callgraph.Graph) bool { - found := make(map[string]int) - err := callgraph.GraphVisitEdges(cg, func(edge *callgraph.Edge) error { - // Name-based matching is inefficient but it allows us to - // match functions whose names that would not appear in an - // index ("") or which are not unique ("func@1.2"). - if edge.Caller.Func.String() == e.args[0] { - calleeStr := edge.Callee.Func.String() - if calleeStr == e.args[1] { - return errOK // expectation satisfied; stop the search - } - found[calleeStr]++ - } - return nil - }) - if err == errOK { - return true - } - if len(found) == 0 { - e.errorf("didn't find any calls from %s", e.args[0]) - } - e.errorf("found no call from %s to %s, but only to %s", - e.args[0], e.args[1], join(found)) - return false -} - -func checkWarningExpectation(prog *ssa.Program, e *expectation, warnings []pointer.Warning) bool { - // TODO(adonovan): check the position part of the warning too? - re, err := regexp.Compile(e.args[0]) - if err != nil { - e.errorf("invalid regular expression in @warning expectation: %s", err.Error()) - return false - } - - if len(warnings) == 0 { - e.errorf("@warning %q expectation, but no warnings", e.args[0]) - return false - } - - for _, w := range warnings { - if re.MatchString(w.Message) { - return true - } - } - - e.errorf("@warning %q expectation not satisfied; found these warnings though:", e.args[0]) - for _, w := range warnings { - fmt.Printf("%s: warning: %s\n", prog.Fset.Position(w.Pos), w.Message) - } - return false -} - -func TestInput(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113") - } - testenv.NeedsGoBuild(t) - - wd, err := os.Getwd() - if err != nil { - t.Errorf("os.Getwd: %s", err) - return - } - - // 'go test' does a chdir so that relative paths in - // diagnostics no longer make sense relative to the invoking - // shell's cwd. We print a special marker so that Emacs can - // make sense of them. - fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd) - - for _, filename := range inputs { - filename := filename - t.Run(filename, func(t *testing.T) { - if filename == "testdata/a_test.go" { - // For some reason this particular file is way more expensive than the others. - if unsafe.Sizeof(unsafe.Pointer(nil)) <= 4 { - t.Skip("skipping memory-intensive test on platform with small address space; https://golang.org/issue/14113") - } - if raceEnabled { - t.Skip("skipping memory-intensive test under race detector; https://golang.org/issue/14113") - } - } else { - t.Parallel() - } - - content, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatalf("couldn't read file '%s': %s", filename, err) - } - - fpath, err := filepath.Abs(filename) - if err != nil { - t.Fatalf("couldn't get absolute path for '%s': %s", filename, err) - } - - if !doOneInput(t, string(content), fpath) { - t.Fail() - } - }) - } -} - -// isGenericBody returns true if fn is the body of a generic function. -func isGenericBody(fn *ssa.Function) bool { - sig := fn.Signature - if typeparams.ForSignature(sig).Len() > 0 || typeparams.RecvTypeParams(sig).Len() > 0 { - return fn.Synthetic == "" - } - return false -} - -// join joins the elements of multiset with " | "s. -func join(set map[string]int) string { - var buf bytes.Buffer - sep := "" - for name, count := range set { - for i := 0; i < count; i++ { - buf.WriteString(sep) - sep = " | " - buf.WriteString(name) - } - } - return buf.String() -} - -// split returns the list of sep-delimited non-empty strings in s. -func split(s, sep string) (r []string) { - for _, elem := range strings.Split(s, sep) { - elem = strings.TrimSpace(elem) - if elem != "" { - r = append(r, elem) - } - } - return -} - -func TestTypeParam(t *testing.T) { - if !typeparams.Enabled { - t.Skip("TestTypeParamInput requires type parameters") - } - // Based on TestInput. Keep this up to date with that. - filename := "testdata/typeparams.go" - - if testing.Short() { - t.Skip("skipping in short mode; this test requires tons of memory; https://golang.org/issue/14113") - } - testenv.NeedsGoBuild(t) - - wd, err := os.Getwd() - if err != nil { - t.Fatalf("os.Getwd: %s", err) - } - fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd) - - content, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatalf("couldn't read file '%s': %s", filename, err) - } - fpath, err := filepath.Abs(filename) - if err != nil { - t.Errorf("couldn't get absolute path for '%s': %s", filename, err) - } - - if !doOneInput(t, string(content), fpath) { - t.Fail() - } -} diff --git a/go/pointer/print.go b/go/pointer/print.go deleted file mode 100644 index 4f2f4c7ae12..00000000000 --- a/go/pointer/print.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import "fmt" - -func (c *addrConstraint) String() string { - return fmt.Sprintf("addr n%d <- {&n%d}", c.dst, c.src) -} - -func (c *copyConstraint) String() string { - return fmt.Sprintf("copy n%d <- n%d", c.dst, c.src) -} - -func (c *loadConstraint) String() string { - return fmt.Sprintf("load n%d <- n%d[%d]", c.dst, c.src, c.offset) -} - -func (c *storeConstraint) String() string { - return fmt.Sprintf("store n%d[%d] <- n%d", c.dst, c.offset, c.src) -} - -func (c *offsetAddrConstraint) String() string { - return fmt.Sprintf("offsetAddr n%d <- n%d.#%d", c.dst, c.src, c.offset) -} - -func (c *typeFilterConstraint) String() string { - return fmt.Sprintf("typeFilter n%d <- n%d.(%s)", c.dst, c.src, c.typ) -} - -func (c *untagConstraint) String() string { - return fmt.Sprintf("untag n%d <- n%d.(%s)", c.dst, c.src, c.typ) -} - -func (c *invokeConstraint) String() string { - return fmt.Sprintf("invoke n%d.%s(n%d ...)", c.iface, c.method.Name(), c.params) -} - -func (n nodeid) String() string { - return fmt.Sprintf("n%d", n) -} diff --git a/go/pointer/query.go b/go/pointer/query.go deleted file mode 100644 index 58aa868b079..00000000000 --- a/go/pointer/query.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import ( - "errors" - "fmt" - "go/ast" - "go/parser" - "go/token" - "go/types" - "strconv" -) - -// An extendedQuery represents a sequence of destructuring operations -// applied to an ssa.Value (denoted by "x"). -type extendedQuery struct { - ops []interface{} - ptr *Pointer -} - -// indexValue returns the value of an integer literal used as an -// index. -func indexValue(expr ast.Expr) (int, error) { - lit, ok := expr.(*ast.BasicLit) - if !ok { - return 0, fmt.Errorf("non-integer index (%T)", expr) - } - if lit.Kind != token.INT { - return 0, fmt.Errorf("non-integer index %s", lit.Value) - } - return strconv.Atoi(lit.Value) -} - -// parseExtendedQuery parses and validates a destructuring Go -// expression and returns the sequence of destructuring operations. -// See parseDestructuringExpr for details. -func parseExtendedQuery(typ types.Type, query string) ([]interface{}, types.Type, error) { - expr, err := parser.ParseExpr(query) - if err != nil { - return nil, nil, err - } - ops, typ, err := destructuringOps(typ, expr) - if err != nil { - return nil, nil, err - } - if len(ops) == 0 { - return nil, nil, errors.New("invalid query: must not be empty") - } - if ops[0] != "x" { - return nil, nil, fmt.Errorf("invalid query: query operand must be named x") - } - if !CanPoint(typ) { - return nil, nil, fmt.Errorf("query does not describe a pointer-like value: %s", typ) - } - return ops, typ, nil -} - -// destructuringOps parses a Go expression consisting only of an -// identifier "x", field selections, indexing, channel receives, load -// operations and parens---for example: "<-(*x[i])[key]"--- and -// returns the sequence of destructuring operations on x. -func destructuringOps(typ types.Type, expr ast.Expr) ([]interface{}, types.Type, error) { - switch expr := expr.(type) { - case *ast.SelectorExpr: - out, typ, err := destructuringOps(typ, expr.X) - if err != nil { - return nil, nil, err - } - - var structT *types.Struct - switch typ := typ.Underlying().(type) { - case *types.Pointer: - var ok bool - structT, ok = typ.Elem().Underlying().(*types.Struct) - if !ok { - return nil, nil, fmt.Errorf("cannot access field %s of pointer to type %s", expr.Sel.Name, typ.Elem()) - } - - out = append(out, "load") - case *types.Struct: - structT = typ - default: - return nil, nil, fmt.Errorf("cannot access field %s of type %s", expr.Sel.Name, typ) - } - - for i := 0; i < structT.NumFields(); i++ { - field := structT.Field(i) - if field.Name() == expr.Sel.Name { - out = append(out, "field", i) - return out, field.Type().Underlying(), nil - } - } - // TODO(dh): supporting embedding would need something like - // types.LookupFieldOrMethod, but without taking package - // boundaries into account, because we may want to access - // unexported fields. If we were only interested in one level - // of unexported name, we could determine the appropriate - // package and run LookupFieldOrMethod with that. However, a - // single query may want to cross multiple package boundaries, - // and at this point it's not really worth the complexity. - return nil, nil, fmt.Errorf("no field %s in %s (embedded fields must be resolved manually)", expr.Sel.Name, structT) - case *ast.Ident: - return []interface{}{expr.Name}, typ, nil - case *ast.BasicLit: - return []interface{}{expr.Value}, nil, nil - case *ast.IndexExpr: - out, typ, err := destructuringOps(typ, expr.X) - if err != nil { - return nil, nil, err - } - switch typ := typ.Underlying().(type) { - case *types.Array: - out = append(out, "arrayelem") - return out, typ.Elem().Underlying(), nil - case *types.Slice: - out = append(out, "sliceelem") - return out, typ.Elem().Underlying(), nil - case *types.Map: - out = append(out, "mapelem") - return out, typ.Elem().Underlying(), nil - case *types.Tuple: - out = append(out, "index") - idx, err := indexValue(expr.Index) - if err != nil { - return nil, nil, err - } - out = append(out, idx) - if idx >= typ.Len() || idx < 0 { - return nil, nil, fmt.Errorf("tuple index %d out of bounds", idx) - } - return out, typ.At(idx).Type().Underlying(), nil - default: - return nil, nil, fmt.Errorf("cannot index type %s", typ) - } - - case *ast.UnaryExpr: - if expr.Op != token.ARROW { - return nil, nil, fmt.Errorf("unsupported unary operator %s", expr.Op) - } - out, typ, err := destructuringOps(typ, expr.X) - if err != nil { - return nil, nil, err - } - ch, ok := typ.(*types.Chan) - if !ok { - return nil, nil, fmt.Errorf("cannot receive from value of type %s", typ) - } - out = append(out, "recv") - return out, ch.Elem().Underlying(), err - case *ast.ParenExpr: - return destructuringOps(typ, expr.X) - case *ast.StarExpr: - out, typ, err := destructuringOps(typ, expr.X) - if err != nil { - return nil, nil, err - } - ptr, ok := typ.(*types.Pointer) - if !ok { - return nil, nil, fmt.Errorf("cannot dereference type %s", typ) - } - out = append(out, "load") - return out, ptr.Elem().Underlying(), err - default: - return nil, nil, fmt.Errorf("unsupported expression %T", expr) - } -} - -func (a *analysis) evalExtendedQuery(t types.Type, id nodeid, ops []interface{}) (types.Type, nodeid) { - pid := id - // TODO(dh): we're allocating intermediary nodes each time - // evalExtendedQuery is called. We should probably only generate - // them once per (v, ops) pair. - for i := 1; i < len(ops); i++ { - var nid nodeid - switch ops[i] { - case "recv": - t = t.(*types.Chan).Elem().Underlying() - nid = a.addNodes(t, "query.extended") - a.load(nid, pid, 0, a.sizeof(t)) - case "field": - i++ // fetch field index - tt := t.(*types.Struct) - idx := ops[i].(int) - offset := a.offsetOf(t, idx) - t = tt.Field(idx).Type().Underlying() - nid = a.addNodes(t, "query.extended") - a.copy(nid, pid+nodeid(offset), a.sizeof(t)) - case "arrayelem": - t = t.(*types.Array).Elem().Underlying() - nid = a.addNodes(t, "query.extended") - a.copy(nid, 1+pid, a.sizeof(t)) - case "sliceelem": - t = t.(*types.Slice).Elem().Underlying() - nid = a.addNodes(t, "query.extended") - a.load(nid, pid, 1, a.sizeof(t)) - case "mapelem": - tt := t.(*types.Map) - t = tt.Elem() - ksize := a.sizeof(tt.Key()) - vsize := a.sizeof(tt.Elem()) - nid = a.addNodes(t, "query.extended") - a.load(nid, pid, ksize, vsize) - case "index": - i++ // fetch index - tt := t.(*types.Tuple) - idx := ops[i].(int) - t = tt.At(idx).Type().Underlying() - nid = a.addNodes(t, "query.extended") - a.copy(nid, pid+nodeid(idx), a.sizeof(t)) - case "load": - t = t.(*types.Pointer).Elem().Underlying() - nid = a.addNodes(t, "query.extended") - a.load(nid, pid, 0, a.sizeof(t)) - default: - // shouldn't happen - panic(fmt.Sprintf("unknown op %q", ops[i])) - } - pid = nid - } - - return t, pid -} diff --git a/go/pointer/query_test.go b/go/pointer/query_test.go deleted file mode 100644 index 4a3112a1f13..00000000000 --- a/go/pointer/query_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import ( - "reflect" - "testing" - - "golang.org/x/tools/go/loader" -) - -func TestParseExtendedQuery(t *testing.T) { - const myprog = ` -package pkg - -import "reflect" - -type T []*int - -var V1 *int -var V2 **int -var V3 []*int -var V4 chan []*int -var V5 struct {F1, F2 chan *int} -var V6 [1]chan *int -var V7 int -var V8 T -var V9 reflect.Value -` - tests := []struct { - in string - out []interface{} - v string - valid bool - }{ - {`x`, []interface{}{"x"}, "V1", true}, - {`x`, []interface{}{"x"}, "V9", true}, - {`*x`, []interface{}{"x", "load"}, "V2", true}, - {`x[0]`, []interface{}{"x", "sliceelem"}, "V3", true}, - {`x[0]`, []interface{}{"x", "sliceelem"}, "V8", true}, - {`<-x`, []interface{}{"x", "recv"}, "V4", true}, - {`(<-x)[0]`, []interface{}{"x", "recv", "sliceelem"}, "V4", true}, - {`<-x.F2`, []interface{}{"x", "field", 1, "recv"}, "V5", true}, - {`<-x[0]`, []interface{}{"x", "arrayelem", "recv"}, "V6", true}, - {`x`, nil, "V7", false}, - {`y`, nil, "V1", false}, - {`x; x`, nil, "V1", false}, - {`x()`, nil, "V1", false}, - {`close(x)`, nil, "V1", false}, - } - - var conf loader.Config - f, err := conf.ParseFile("file.go", myprog) - if err != nil { - t.Fatal(err) - } - conf.CreateFromFiles("main", f) - lprog, err := conf.Load() - if err != nil { - t.Fatal(err) - } - pkg := lprog.Created[0].Pkg - - for _, test := range tests { - typ := pkg.Scope().Lookup(test.v).Type() - ops, _, err := parseExtendedQuery(typ, test.in) - if test.valid && err != nil { - t.Errorf("parseExtendedQuery(%q) = %s, expected no error", test.in, err) - } - if !test.valid && err == nil { - t.Errorf("parseExtendedQuery(%q) succeeded, expected error", test.in) - } - - if !reflect.DeepEqual(ops, test.out) { - t.Errorf("parseExtendedQuery(%q) = %#v, want %#v", test.in, ops, test.out) - } - } -} diff --git a/go/pointer/reflect.go b/go/pointer/reflect.go deleted file mode 100644 index 3762dd8d401..00000000000 --- a/go/pointer/reflect.go +++ /dev/null @@ -1,1973 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file implements the generation and resolution rules for -// constraints arising from the use of reflection in the target -// program. See doc.go for explanation of the representation. -// -// For consistency, the names of all parameters match those of the -// actual functions in the "reflect" package. -// -// To avoid proliferation of equivalent labels, intrinsics should -// memoize as much as possible, like TypeOf and Zero do for their -// tagged objects. -// -// TODO(adonovan): this file is rather subtle. Explain how we derive -// the implementation of each reflect operator from its spec, -// including the subtleties of reflect.flag{Addr,RO,Indir}. -// [Hint: our implementation is as if reflect.flagIndir was always -// true, i.e. reflect.Values are pointers to tagged objects, there is -// no inline allocation optimization; and indirect tagged objects (not -// yet implemented) correspond to reflect.Values with -// reflect.flagAddr.] -// A picture would help too. -// -// TODO(adonovan): try factoring up the common parts of the majority of -// these constraints that are single input, single output. - -import ( - "fmt" - "go/constant" - "go/types" - "reflect" - - "golang.org/x/tools/go/ssa" -) - -func init() { - for name, fn := range map[string]intrinsic{ - // reflect.Value methods. - "(reflect.Value).Addr": ext۰reflect۰Value۰Addr, - "(reflect.Value).Bool": ext۰NoEffect, - "(reflect.Value).Bytes": ext۰reflect۰Value۰Bytes, - "(reflect.Value).Call": ext۰reflect۰Value۰Call, - "(reflect.Value).CallSlice": ext۰reflect۰Value۰CallSlice, - "(reflect.Value).CanAddr": ext۰NoEffect, - "(reflect.Value).CanInterface": ext۰NoEffect, - "(reflect.Value).CanSet": ext۰NoEffect, - "(reflect.Value).Cap": ext۰NoEffect, - "(reflect.Value).Close": ext۰NoEffect, - "(reflect.Value).Complex": ext۰NoEffect, - "(reflect.Value).Convert": ext۰reflect۰Value۰Convert, - "(reflect.Value).Elem": ext۰reflect۰Value۰Elem, - "(reflect.Value).Field": ext۰reflect۰Value۰Field, - "(reflect.Value).FieldByIndex": ext۰reflect۰Value۰FieldByIndex, - "(reflect.Value).FieldByName": ext۰reflect۰Value۰FieldByName, - "(reflect.Value).FieldByNameFunc": ext۰reflect۰Value۰FieldByNameFunc, - "(reflect.Value).Float": ext۰NoEffect, - "(reflect.Value).Index": ext۰reflect۰Value۰Index, - "(reflect.Value).Int": ext۰NoEffect, - "(reflect.Value).Interface": ext۰reflect۰Value۰Interface, - "(reflect.Value).InterfaceData": ext۰NoEffect, - "(reflect.Value).IsNil": ext۰NoEffect, - "(reflect.Value).IsValid": ext۰NoEffect, - "(reflect.Value).Kind": ext۰NoEffect, - "(reflect.Value).Len": ext۰NoEffect, - "(reflect.Value).MapIndex": ext۰reflect۰Value۰MapIndex, - "(reflect.Value).MapKeys": ext۰reflect۰Value۰MapKeys, - "(reflect.Value).Method": ext۰reflect۰Value۰Method, - "(reflect.Value).MethodByName": ext۰reflect۰Value۰MethodByName, - "(reflect.Value).NumField": ext۰NoEffect, - "(reflect.Value).NumMethod": ext۰NoEffect, - "(reflect.Value).OverflowComplex": ext۰NoEffect, - "(reflect.Value).OverflowFloat": ext۰NoEffect, - "(reflect.Value).OverflowInt": ext۰NoEffect, - "(reflect.Value).OverflowUint": ext۰NoEffect, - "(reflect.Value).Pointer": ext۰NoEffect, - "(reflect.Value).Recv": ext۰reflect۰Value۰Recv, - "(reflect.Value).Send": ext۰reflect۰Value۰Send, - "(reflect.Value).Set": ext۰reflect۰Value۰Set, - "(reflect.Value).SetBool": ext۰NoEffect, - "(reflect.Value).SetBytes": ext۰reflect۰Value۰SetBytes, - "(reflect.Value).SetComplex": ext۰NoEffect, - "(reflect.Value).SetFloat": ext۰NoEffect, - "(reflect.Value).SetInt": ext۰NoEffect, - "(reflect.Value).SetLen": ext۰NoEffect, - "(reflect.Value).SetMapIndex": ext۰reflect۰Value۰SetMapIndex, - "(reflect.Value).SetPointer": ext۰reflect۰Value۰SetPointer, - "(reflect.Value).SetString": ext۰NoEffect, - "(reflect.Value).SetUint": ext۰NoEffect, - "(reflect.Value).Slice": ext۰reflect۰Value۰Slice, - "(reflect.Value).String": ext۰NoEffect, - "(reflect.Value).TryRecv": ext۰reflect۰Value۰Recv, - "(reflect.Value).TrySend": ext۰reflect۰Value۰Send, - "(reflect.Value).Type": ext۰NoEffect, - "(reflect.Value).Uint": ext۰NoEffect, - "(reflect.Value).UnsafeAddr": ext۰NoEffect, - - // Standalone reflect.* functions. - "reflect.Append": ext۰reflect۰Append, - "reflect.AppendSlice": ext۰reflect۰AppendSlice, - "reflect.Copy": ext۰reflect۰Copy, - "reflect.ChanOf": ext۰reflect۰ChanOf, - "reflect.DeepEqual": ext۰NoEffect, - "reflect.Indirect": ext۰reflect۰Indirect, - "reflect.MakeChan": ext۰reflect۰MakeChan, - "reflect.MakeFunc": ext۰reflect۰MakeFunc, - "reflect.MakeMap": ext۰reflect۰MakeMap, - "reflect.MakeSlice": ext۰reflect۰MakeSlice, - "reflect.MapOf": ext۰reflect۰MapOf, - "reflect.New": ext۰reflect۰New, - "reflect.NewAt": ext۰reflect۰NewAt, - "reflect.PtrTo": ext۰reflect۰PtrTo, - "reflect.Select": ext۰reflect۰Select, - "reflect.SliceOf": ext۰reflect۰SliceOf, - "reflect.TypeOf": ext۰reflect۰TypeOf, - "reflect.ValueOf": ext۰reflect۰ValueOf, - "reflect.Zero": ext۰reflect۰Zero, - "reflect.init": ext۰NoEffect, - - // *reflect.rtype methods - "(*reflect.rtype).Align": ext۰NoEffect, - "(*reflect.rtype).AssignableTo": ext۰NoEffect, - "(*reflect.rtype).Bits": ext۰NoEffect, - "(*reflect.rtype).ChanDir": ext۰NoEffect, - "(*reflect.rtype).ConvertibleTo": ext۰NoEffect, - "(*reflect.rtype).Elem": ext۰reflect۰rtype۰Elem, - "(*reflect.rtype).Field": ext۰reflect۰rtype۰Field, - "(*reflect.rtype).FieldAlign": ext۰NoEffect, - "(*reflect.rtype).FieldByIndex": ext۰reflect۰rtype۰FieldByIndex, - "(*reflect.rtype).FieldByName": ext۰reflect۰rtype۰FieldByName, - "(*reflect.rtype).FieldByNameFunc": ext۰reflect۰rtype۰FieldByNameFunc, - "(*reflect.rtype).Implements": ext۰NoEffect, - "(*reflect.rtype).In": ext۰reflect۰rtype۰In, - "(*reflect.rtype).IsVariadic": ext۰NoEffect, - "(*reflect.rtype).Key": ext۰reflect۰rtype۰Key, - "(*reflect.rtype).Kind": ext۰NoEffect, - "(*reflect.rtype).Len": ext۰NoEffect, - "(*reflect.rtype).Method": ext۰reflect۰rtype۰Method, - "(*reflect.rtype).MethodByName": ext۰reflect۰rtype۰MethodByName, - "(*reflect.rtype).Name": ext۰NoEffect, - "(*reflect.rtype).NumField": ext۰NoEffect, - "(*reflect.rtype).NumIn": ext۰NoEffect, - "(*reflect.rtype).NumMethod": ext۰NoEffect, - "(*reflect.rtype).NumOut": ext۰NoEffect, - "(*reflect.rtype).Out": ext۰reflect۰rtype۰Out, - "(*reflect.rtype).PkgPath": ext۰NoEffect, - "(*reflect.rtype).Size": ext۰NoEffect, - "(*reflect.rtype).String": ext۰NoEffect, - } { - intrinsicsByName[name] = fn - } -} - -// -------------------- (reflect.Value) -------------------- - -func ext۰reflect۰Value۰Addr(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).Bytes() Value ---------- - -// result = v.Bytes() -type rVBytesConstraint struct { - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVBytesConstraint) ptr() nodeid { return c.v } -func (c *rVBytesConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVBytes.result") -} -func (c *rVBytesConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVBytesConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Bytes()", c.result, c.v) -} - -func (c *rVBytesConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, slice, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - tSlice, ok := tDyn.Underlying().(*types.Slice) - if ok && types.Identical(tSlice.Elem(), types.Typ[types.Uint8]) { - if a.onlineCopy(c.result, slice) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Bytes(a *analysis, cgn *cgnode) { - a.addConstraint(&rVBytesConstraint{ - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (Value).Call(in []Value) []Value ---------- - -// result = v.Call(in) -type rVCallConstraint struct { - cgn *cgnode - targets nodeid // (indirect) - v nodeid // (ptr) - arg nodeid // = in[*] - result nodeid // (indirect) - dotdotdot bool // interpret last arg as a "..." slice -} - -func (c *rVCallConstraint) ptr() nodeid { return c.v } -func (c *rVCallConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.targets), "rVCall.targets") - h.markIndirect(onodeid(c.result), "rVCall.result") -} -func (c *rVCallConstraint) renumber(mapping []nodeid) { - c.targets = mapping[c.targets] - c.v = mapping[c.v] - c.arg = mapping[c.arg] - c.result = mapping[c.result] -} - -func (c *rVCallConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Call(n%d)", c.result, c.v, c.arg) -} - -func (c *rVCallConstraint) solve(a *analysis, delta *nodeset) { - if c.targets == 0 { - panic("no targets") - } - - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, fn, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - tSig, ok := tDyn.Underlying().(*types.Signature) - if !ok { - continue // not a function - } - if tSig.Recv() != nil { - panic(tSig) // TODO(adonovan): rethink when we implement Method() - } - - // Add dynamic call target. - if a.onlineCopy(c.targets, fn) { - a.addWork(c.targets) - // TODO(adonovan): is 'else continue' a sound optimisation here? - } - - // Allocate a P/R block. - tParams := tSig.Params() - tResults := tSig.Results() - params := a.addNodes(tParams, "rVCall.params") - results := a.addNodes(tResults, "rVCall.results") - - // Make a dynamic call to 'fn'. - a.store(fn, params, 1, a.sizeof(tParams)) - a.load(results, fn, 1+a.sizeof(tParams), a.sizeof(tResults)) - - // Populate P by type-asserting each actual arg (all merged in c.arg). - for i, n := 0, tParams.Len(); i < n; i++ { - T := tParams.At(i).Type() - a.typeAssert(T, params, c.arg, false) - params += nodeid(a.sizeof(T)) - } - - // Use R by tagging and copying each actual result to c.result. - for i, n := 0, tResults.Len(); i < n; i++ { - T := tResults.At(i).Type() - // Convert from an arbitrary type to a reflect.Value - // (like MakeInterface followed by reflect.ValueOf). - if isInterface(T) { - // (don't tag) - if a.onlineCopy(c.result, results) { - changed = true - } - } else { - obj := a.makeTagged(T, c.cgn, nil) - a.onlineCopyN(obj+1, results, a.sizeof(T)) - if a.addLabel(c.result, obj) { // (true) - changed = true - } - } - results += nodeid(a.sizeof(T)) - } - } - if changed { - a.addWork(c.result) - } -} - -// Common code for direct (inlined) and indirect calls to (reflect.Value).Call. -func reflectCallImpl(a *analysis, cgn *cgnode, site *callsite, recv, arg nodeid, dotdotdot bool) nodeid { - // Allocate []reflect.Value array for the result. - ret := a.nextNode() - a.addNodes(types.NewArray(a.reflectValueObj.Type(), 1), "rVCall.ret") - a.endObject(ret, cgn, nil) - - // pts(targets) will be the set of possible call targets. - site.targets = a.addOneNode(tInvalid, "rvCall.targets", nil) - - // All arguments are merged since they arrive in a slice. - argelts := a.addOneNode(a.reflectValueObj.Type(), "rVCall.args", nil) - a.load(argelts, arg, 1, 1) // slice elements - - a.addConstraint(&rVCallConstraint{ - cgn: cgn, - targets: site.targets, - v: recv, - arg: argelts, - result: ret + 1, // results go into elements of ret - dotdotdot: dotdotdot, - }) - return ret -} - -func reflectCall(a *analysis, cgn *cgnode, dotdotdot bool) { - // This is the shared contour implementation of (reflect.Value).Call - // and CallSlice, as used by indirect calls (rare). - // Direct calls are inlined in gen.go, eliding the - // intermediate cgnode for Call. - site := new(callsite) - cgn.sites = append(cgn.sites, site) - recv := a.funcParams(cgn.obj) - arg := recv + 1 - ret := reflectCallImpl(a, cgn, site, recv, arg, dotdotdot) - a.addressOf(cgn.fn.Signature.Results().At(0).Type(), a.funcResults(cgn.obj), ret) -} - -func ext۰reflect۰Value۰Call(a *analysis, cgn *cgnode) { - reflectCall(a, cgn, false) -} - -func ext۰reflect۰Value۰CallSlice(a *analysis, cgn *cgnode) { - // TODO(adonovan): implement. Also, inline direct calls in gen.go too. - if false { - reflectCall(a, cgn, true) - } -} - -func ext۰reflect۰Value۰Convert(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).Elem() Value ---------- - -// result = v.Elem() -type rVElemConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVElemConstraint) ptr() nodeid { return c.v } -func (c *rVElemConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVElem.result") -} -func (c *rVElemConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVElemConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Elem()", c.result, c.v) -} - -func (c *rVElemConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, payload, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - switch t := tDyn.Underlying().(type) { - case *types.Interface: - if a.onlineCopy(c.result, payload) { - changed = true - } - - case *types.Pointer: - obj := a.makeTagged(t.Elem(), c.cgn, nil) - a.load(obj+1, payload, 0, a.sizeof(t.Elem())) - if a.addLabel(c.result, obj) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Elem(a *analysis, cgn *cgnode) { - a.addConstraint(&rVElemConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰Value۰Field(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰Value۰FieldByIndex(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰Value۰FieldByName(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰Value۰FieldByNameFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).Index() Value ---------- - -// result = v.Index() -type rVIndexConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVIndexConstraint) ptr() nodeid { return c.v } -func (c *rVIndexConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVIndex.result") -} -func (c *rVIndexConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVIndexConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Index()", c.result, c.v) -} - -func (c *rVIndexConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, payload, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - var res nodeid - switch t := tDyn.Underlying().(type) { - case *types.Array: - res = a.makeTagged(t.Elem(), c.cgn, nil) - a.onlineCopyN(res+1, payload+1, a.sizeof(t.Elem())) - - case *types.Slice: - res = a.makeTagged(t.Elem(), c.cgn, nil) - a.load(res+1, payload, 1, a.sizeof(t.Elem())) - - case *types.Basic: - if t.Kind() == types.String { - res = a.makeTagged(types.Typ[types.Rune], c.cgn, nil) - } - } - if res != 0 && a.addLabel(c.result, res) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Index(a *analysis, cgn *cgnode) { - a.addConstraint(&rVIndexConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (Value).Interface() Value ---------- - -// result = v.Interface() -type rVInterfaceConstraint struct { - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVInterfaceConstraint) ptr() nodeid { return c.v } -func (c *rVInterfaceConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVInterface.result") -} -func (c *rVInterfaceConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVInterfaceConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Interface()", c.result, c.v) -} - -func (c *rVInterfaceConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, payload, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - if isInterface(tDyn) { - if a.onlineCopy(c.result, payload) { - a.addWork(c.result) - } - } else { - if a.addLabel(c.result, vObj) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Interface(a *analysis, cgn *cgnode) { - a.addConstraint(&rVInterfaceConstraint{ - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (Value).MapIndex(Value) Value ---------- - -// result = v.MapIndex(_) -type rVMapIndexConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVMapIndexConstraint) ptr() nodeid { return c.v } -func (c *rVMapIndexConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVMapIndex.result") -} -func (c *rVMapIndexConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVMapIndexConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.MapIndex(_)", c.result, c.v) -} - -func (c *rVMapIndexConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, m, indirect := a.taggedValue(vObj) - tMap, _ := tDyn.Underlying().(*types.Map) - if tMap == nil { - continue // not a map - } - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - obj := a.makeTagged(tMap.Elem(), c.cgn, nil) - a.load(obj+1, m, a.sizeof(tMap.Key()), a.sizeof(tMap.Elem())) - if a.addLabel(c.result, obj) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰MapIndex(a *analysis, cgn *cgnode) { - a.addConstraint(&rVMapIndexConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (Value).MapKeys() []Value ---------- - -// result = v.MapKeys() -type rVMapKeysConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVMapKeysConstraint) ptr() nodeid { return c.v } -func (c *rVMapKeysConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVMapKeys.result") -} -func (c *rVMapKeysConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVMapKeysConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.MapKeys()", c.result, c.v) -} - -func (c *rVMapKeysConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, m, indirect := a.taggedValue(vObj) - tMap, _ := tDyn.Underlying().(*types.Map) - if tMap == nil { - continue // not a map - } - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - kObj := a.makeTagged(tMap.Key(), c.cgn, nil) - a.load(kObj+1, m, 0, a.sizeof(tMap.Key())) - if a.addLabel(c.result, kObj) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰MapKeys(a *analysis, cgn *cgnode) { - // Allocate an array for the result. - obj := a.nextNode() - T := types.NewSlice(a.reflectValueObj.Type()) - a.addNodes(sliceToArray(T), "reflect.MapKeys result") - a.endObject(obj, cgn, nil) - a.addressOf(T, a.funcResults(cgn.obj), obj) - - a.addConstraint(&rVMapKeysConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: obj + 1, // result is stored in array elems - }) -} - -func ext۰reflect۰Value۰Method(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰Value۰MethodByName(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).Recv(Value) Value ---------- - -// result, _ = v.Recv() -type rVRecvConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVRecvConstraint) ptr() nodeid { return c.v } -func (c *rVRecvConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVRecv.result") -} -func (c *rVRecvConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVRecvConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Recv()", c.result, c.v) -} - -func (c *rVRecvConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, ch, indirect := a.taggedValue(vObj) - tChan, _ := tDyn.Underlying().(*types.Chan) - if tChan == nil { - continue // not a channel - } - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - tElem := tChan.Elem() - elemObj := a.makeTagged(tElem, c.cgn, nil) - a.load(elemObj+1, ch, 0, a.sizeof(tElem)) - if a.addLabel(c.result, elemObj) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Recv(a *analysis, cgn *cgnode) { - a.addConstraint(&rVRecvConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (Value).Send(Value) ---------- - -// v.Send(x) -type rVSendConstraint struct { - cgn *cgnode - v nodeid // (ptr) - x nodeid -} - -func (c *rVSendConstraint) ptr() nodeid { return c.v } -func (c *rVSendConstraint) presolve(*hvn) {} -func (c *rVSendConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.x = mapping[c.x] -} - -func (c *rVSendConstraint) String() string { - return fmt.Sprintf("reflect n%d.Send(n%d)", c.v, c.x) -} - -func (c *rVSendConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, ch, indirect := a.taggedValue(vObj) - tChan, _ := tDyn.Underlying().(*types.Chan) - if tChan == nil { - continue // not a channel - } - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - // Extract x's payload to xtmp, then store to channel. - tElem := tChan.Elem() - xtmp := a.addNodes(tElem, "Send.xtmp") - a.typeAssert(tElem, xtmp, c.x, false) - a.store(ch, xtmp, 0, a.sizeof(tElem)) - } -} - -func ext۰reflect۰Value۰Send(a *analysis, cgn *cgnode) { - params := a.funcParams(cgn.obj) - a.addConstraint(&rVSendConstraint{ - cgn: cgn, - v: params, - x: params + 1, - }) -} - -func ext۰reflect۰Value۰Set(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).SetBytes(x []byte) ---------- - -// v.SetBytes(x) -type rVSetBytesConstraint struct { - cgn *cgnode - v nodeid // (ptr) - x nodeid -} - -func (c *rVSetBytesConstraint) ptr() nodeid { return c.v } -func (c *rVSetBytesConstraint) presolve(*hvn) {} -func (c *rVSetBytesConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.x = mapping[c.x] -} - -func (c *rVSetBytesConstraint) String() string { - return fmt.Sprintf("reflect n%d.SetBytes(n%d)", c.v, c.x) -} - -func (c *rVSetBytesConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, slice, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - tSlice, ok := tDyn.Underlying().(*types.Slice) - if ok && types.Identical(tSlice.Elem(), types.Typ[types.Uint8]) { - if a.onlineCopy(slice, c.x) { - a.addWork(slice) - } - } - } -} - -func ext۰reflect۰Value۰SetBytes(a *analysis, cgn *cgnode) { - params := a.funcParams(cgn.obj) - a.addConstraint(&rVSetBytesConstraint{ - cgn: cgn, - v: params, - x: params + 1, - }) -} - -// ---------- func (Value).SetMapIndex(k Value, v Value) ---------- - -// v.SetMapIndex(key, val) -type rVSetMapIndexConstraint struct { - cgn *cgnode - v nodeid // (ptr) - key nodeid - val nodeid -} - -func (c *rVSetMapIndexConstraint) ptr() nodeid { return c.v } -func (c *rVSetMapIndexConstraint) presolve(*hvn) {} -func (c *rVSetMapIndexConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.key = mapping[c.key] - c.val = mapping[c.val] -} - -func (c *rVSetMapIndexConstraint) String() string { - return fmt.Sprintf("reflect n%d.SetMapIndex(n%d, n%d)", c.v, c.key, c.val) -} - -func (c *rVSetMapIndexConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, m, indirect := a.taggedValue(vObj) - tMap, _ := tDyn.Underlying().(*types.Map) - if tMap == nil { - continue // not a map - } - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - keysize := a.sizeof(tMap.Key()) - - // Extract key's payload to keytmp, then store to map key. - keytmp := a.addNodes(tMap.Key(), "SetMapIndex.keytmp") - a.typeAssert(tMap.Key(), keytmp, c.key, false) - a.store(m, keytmp, 0, keysize) - - // Extract val's payload to vtmp, then store to map value. - valtmp := a.addNodes(tMap.Elem(), "SetMapIndex.valtmp") - a.typeAssert(tMap.Elem(), valtmp, c.val, false) - a.store(m, valtmp, keysize, a.sizeof(tMap.Elem())) - } -} - -func ext۰reflect۰Value۰SetMapIndex(a *analysis, cgn *cgnode) { - params := a.funcParams(cgn.obj) - a.addConstraint(&rVSetMapIndexConstraint{ - cgn: cgn, - v: params, - key: params + 1, - val: params + 2, - }) -} - -func ext۰reflect۰Value۰SetPointer(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (Value).Slice(v Value, i, j int) Value ---------- - -// result = v.Slice(_, _) -type rVSliceConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rVSliceConstraint) ptr() nodeid { return c.v } -func (c *rVSliceConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rVSlice.result") -} -func (c *rVSliceConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *rVSliceConstraint) String() string { - return fmt.Sprintf("n%d = reflect n%d.Slice(_, _)", c.result, c.v) -} - -func (c *rVSliceConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, payload, indirect := a.taggedValue(vObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - var res nodeid - switch t := tDyn.Underlying().(type) { - case *types.Pointer: - if tArr, ok := t.Elem().Underlying().(*types.Array); ok { - // pointer to array - res = a.makeTagged(types.NewSlice(tArr.Elem()), c.cgn, nil) - if a.onlineCopy(res+1, payload) { - a.addWork(res + 1) - } - } - - case *types.Array: - // TODO(adonovan): implement addressable - // arrays when we do indirect tagged objects. - - case *types.Slice: - res = vObj - - case *types.Basic: - if t == types.Typ[types.String] { - res = vObj - } - } - - if res != 0 && a.addLabel(c.result, res) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Value۰Slice(a *analysis, cgn *cgnode) { - a.addConstraint(&rVSliceConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// -------------------- Standalone reflect functions -------------------- - -func ext۰reflect۰Append(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰AppendSlice(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰Copy(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func ChanOf(ChanDir, Type) Type ---------- - -// result = ChanOf(dir, t) -type reflectChanOfConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) - dirs []types.ChanDir -} - -func (c *reflectChanOfConstraint) ptr() nodeid { return c.t } -func (c *reflectChanOfConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectChanOf.result") -} -func (c *reflectChanOfConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *reflectChanOfConstraint) String() string { - return fmt.Sprintf("n%d = reflect.ChanOf(n%d)", c.result, c.t) -} - -func (c *reflectChanOfConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.rtypeTaggedValue(tObj) - - if typeTooHigh(T) { - continue - } - - for _, dir := range c.dirs { - if a.addLabel(c.result, a.makeRtype(types.NewChan(dir, T))) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -// dirMap maps reflect.ChanDir to the set of channel types generated by ChanOf. -var dirMap = [...][]types.ChanDir{ - 0: {types.SendOnly, types.RecvOnly, types.SendRecv}, // unknown - reflect.RecvDir: {types.RecvOnly}, - reflect.SendDir: {types.SendOnly}, - reflect.BothDir: {types.SendRecv}, -} - -func ext۰reflect۰ChanOf(a *analysis, cgn *cgnode) { - // If we have access to the callsite, - // and the channel argument is a constant (as is usual), - // only generate the requested direction. - var dir reflect.ChanDir // unknown - if site := cgn.callersite; site != nil { - if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - v := c.Int64() - if 0 <= v && v <= int64(reflect.BothDir) { - dir = reflect.ChanDir(v) - } - } - } - - params := a.funcParams(cgn.obj) - a.addConstraint(&reflectChanOfConstraint{ - cgn: cgn, - t: params + 1, - result: a.funcResults(cgn.obj), - dirs: dirMap[dir], - }) -} - -// ---------- func Indirect(v Value) Value ---------- - -// result = Indirect(v) -type reflectIndirectConstraint struct { - cgn *cgnode - v nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectIndirectConstraint) ptr() nodeid { return c.v } -func (c *reflectIndirectConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectIndirect.result") -} -func (c *reflectIndirectConstraint) renumber(mapping []nodeid) { - c.v = mapping[c.v] - c.result = mapping[c.result] -} - -func (c *reflectIndirectConstraint) String() string { - return fmt.Sprintf("n%d = reflect.Indirect(n%d)", c.result, c.v) -} - -func (c *reflectIndirectConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - vObj := nodeid(x) - tDyn, _, _ := a.taggedValue(vObj) - var res nodeid - if tPtr, ok := tDyn.Underlying().(*types.Pointer); ok { - // load the payload of the pointer's tagged object - // into a new tagged object - res = a.makeTagged(tPtr.Elem(), c.cgn, nil) - a.load(res+1, vObj+1, 0, a.sizeof(tPtr.Elem())) - } else { - res = vObj - } - - if a.addLabel(c.result, res) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Indirect(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectIndirectConstraint{ - cgn: cgn, - v: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func MakeChan(Type) Value ---------- - -// result = MakeChan(typ) -type reflectMakeChanConstraint struct { - cgn *cgnode - typ nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectMakeChanConstraint) ptr() nodeid { return c.typ } -func (c *reflectMakeChanConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectMakeChan.result") -} -func (c *reflectMakeChanConstraint) renumber(mapping []nodeid) { - c.typ = mapping[c.typ] - c.result = mapping[c.result] -} - -func (c *reflectMakeChanConstraint) String() string { - return fmt.Sprintf("n%d = reflect.MakeChan(n%d)", c.result, c.typ) -} - -func (c *reflectMakeChanConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - typObj := nodeid(x) - T := a.rtypeTaggedValue(typObj) - tChan, ok := T.Underlying().(*types.Chan) - if !ok || tChan.Dir() != types.SendRecv { - continue // not a bidirectional channel type - } - - obj := a.nextNode() - a.addNodes(tChan.Elem(), "reflect.MakeChan.value") - a.endObject(obj, c.cgn, nil) - - // put its address in a new T-tagged object - id := a.makeTagged(T, c.cgn, nil) - a.addLabel(id+1, obj) - - // flow the T-tagged object to the result - if a.addLabel(c.result, id) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰MakeChan(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectMakeChanConstraint{ - cgn: cgn, - typ: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰MakeFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func MakeMap(Type) Value ---------- - -// result = MakeMap(typ) -type reflectMakeMapConstraint struct { - cgn *cgnode - typ nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectMakeMapConstraint) ptr() nodeid { return c.typ } -func (c *reflectMakeMapConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectMakeMap.result") -} -func (c *reflectMakeMapConstraint) renumber(mapping []nodeid) { - c.typ = mapping[c.typ] - c.result = mapping[c.result] -} - -func (c *reflectMakeMapConstraint) String() string { - return fmt.Sprintf("n%d = reflect.MakeMap(n%d)", c.result, c.typ) -} - -func (c *reflectMakeMapConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - typObj := nodeid(x) - T := a.rtypeTaggedValue(typObj) - tMap, ok := T.Underlying().(*types.Map) - if !ok { - continue // not a map type - } - - mapObj := a.nextNode() - a.addNodes(tMap.Key(), "reflect.MakeMap.key") - a.addNodes(tMap.Elem(), "reflect.MakeMap.value") - a.endObject(mapObj, c.cgn, nil) - - // put its address in a new T-tagged object - id := a.makeTagged(T, c.cgn, nil) - a.addLabel(id+1, mapObj) - - // flow the T-tagged object to the result - if a.addLabel(c.result, id) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰MakeMap(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectMakeMapConstraint{ - cgn: cgn, - typ: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func MakeSlice(Type) Value ---------- - -// result = MakeSlice(typ) -type reflectMakeSliceConstraint struct { - cgn *cgnode - typ nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectMakeSliceConstraint) ptr() nodeid { return c.typ } -func (c *reflectMakeSliceConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectMakeSlice.result") -} -func (c *reflectMakeSliceConstraint) renumber(mapping []nodeid) { - c.typ = mapping[c.typ] - c.result = mapping[c.result] -} - -func (c *reflectMakeSliceConstraint) String() string { - return fmt.Sprintf("n%d = reflect.MakeSlice(n%d)", c.result, c.typ) -} - -func (c *reflectMakeSliceConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - typObj := nodeid(x) - T := a.rtypeTaggedValue(typObj) - if _, ok := T.Underlying().(*types.Slice); !ok { - continue // not a slice type - } - - obj := a.nextNode() - a.addNodes(sliceToArray(T), "reflect.MakeSlice") - a.endObject(obj, c.cgn, nil) - - // put its address in a new T-tagged object - id := a.makeTagged(T, c.cgn, nil) - a.addLabel(id+1, obj) - - // flow the T-tagged object to the result - if a.addLabel(c.result, id) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰MakeSlice(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectMakeSliceConstraint{ - cgn: cgn, - typ: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰MapOf(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func New(Type) Value ---------- - -// result = New(typ) -type reflectNewConstraint struct { - cgn *cgnode - typ nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectNewConstraint) ptr() nodeid { return c.typ } -func (c *reflectNewConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectNew.result") -} -func (c *reflectNewConstraint) renumber(mapping []nodeid) { - c.typ = mapping[c.typ] - c.result = mapping[c.result] -} - -func (c *reflectNewConstraint) String() string { - return fmt.Sprintf("n%d = reflect.New(n%d)", c.result, c.typ) -} - -func (c *reflectNewConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - typObj := nodeid(x) - T := a.rtypeTaggedValue(typObj) - - // allocate new T object - newObj := a.nextNode() - a.addNodes(T, "reflect.New") - a.endObject(newObj, c.cgn, nil) - - // put its address in a new *T-tagged object - id := a.makeTagged(types.NewPointer(T), c.cgn, nil) - a.addLabel(id+1, newObj) - - // flow the pointer to the result - if a.addLabel(c.result, id) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰New(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectNewConstraint{ - cgn: cgn, - typ: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰NewAt(a *analysis, cgn *cgnode) { - ext۰reflect۰New(a, cgn) - - // TODO(adonovan): also report dynamic calls to unsound intrinsics. - if site := cgn.callersite; site != nil { - a.warnf(site.pos(), "unsound: %s contains a reflect.NewAt() call", site.instr.Parent()) - } -} - -// ---------- func PtrTo(Type) Type ---------- - -// result = PtrTo(t) -type reflectPtrToConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectPtrToConstraint) ptr() nodeid { return c.t } -func (c *reflectPtrToConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectPtrTo.result") -} -func (c *reflectPtrToConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *reflectPtrToConstraint) String() string { - return fmt.Sprintf("n%d = reflect.PtrTo(n%d)", c.result, c.t) -} - -func (c *reflectPtrToConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.rtypeTaggedValue(tObj) - - if typeTooHigh(T) { - continue - } - - if a.addLabel(c.result, a.makeRtype(types.NewPointer(T))) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰PtrTo(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectPtrToConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰Select(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func SliceOf(Type) Type ---------- - -// result = SliceOf(t) -type reflectSliceOfConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectSliceOfConstraint) ptr() nodeid { return c.t } -func (c *reflectSliceOfConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectSliceOf.result") -} -func (c *reflectSliceOfConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *reflectSliceOfConstraint) String() string { - return fmt.Sprintf("n%d = reflect.SliceOf(n%d)", c.result, c.t) -} - -func (c *reflectSliceOfConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.rtypeTaggedValue(tObj) - - if typeTooHigh(T) { - continue - } - - if a.addLabel(c.result, a.makeRtype(types.NewSlice(T))) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰SliceOf(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectSliceOfConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func TypeOf(v Value) Type ---------- - -// result = TypeOf(i) -type reflectTypeOfConstraint struct { - cgn *cgnode - i nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectTypeOfConstraint) ptr() nodeid { return c.i } -func (c *reflectTypeOfConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectTypeOf.result") -} -func (c *reflectTypeOfConstraint) renumber(mapping []nodeid) { - c.i = mapping[c.i] - c.result = mapping[c.result] -} - -func (c *reflectTypeOfConstraint) String() string { - return fmt.Sprintf("n%d = reflect.TypeOf(n%d)", c.result, c.i) -} - -func (c *reflectTypeOfConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - iObj := nodeid(x) - tDyn, _, _ := a.taggedValue(iObj) - if a.addLabel(c.result, a.makeRtype(tDyn)) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰TypeOf(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectTypeOfConstraint{ - cgn: cgn, - i: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func ValueOf(interface{}) Value ---------- - -func ext۰reflect۰ValueOf(a *analysis, cgn *cgnode) { - // TODO(adonovan): when we start creating indirect tagged - // objects, we'll need to handle them specially here since - // they must never appear in the PTS of an interface{}. - a.copy(a.funcResults(cgn.obj), a.funcParams(cgn.obj), 1) -} - -// ---------- func Zero(Type) Value ---------- - -// result = Zero(typ) -type reflectZeroConstraint struct { - cgn *cgnode - typ nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *reflectZeroConstraint) ptr() nodeid { return c.typ } -func (c *reflectZeroConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "reflectZero.result") -} -func (c *reflectZeroConstraint) renumber(mapping []nodeid) { - c.typ = mapping[c.typ] - c.result = mapping[c.result] -} - -func (c *reflectZeroConstraint) String() string { - return fmt.Sprintf("n%d = reflect.Zero(n%d)", c.result, c.typ) -} - -func (c *reflectZeroConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - typObj := nodeid(x) - T := a.rtypeTaggedValue(typObj) - - // TODO(adonovan): if T is an interface type, we need - // to create an indirect tagged object containing - // new(T). To avoid updates of such shared values, - // we'll need another flag on indirect tagged objects - // that marks whether they are addressable or - // readonly, just like the reflect package does. - - // memoize using a.reflectZeros[T] - var id nodeid - if z := a.reflectZeros.At(T); false && z != nil { - id = z.(nodeid) - } else { - id = a.makeTagged(T, c.cgn, nil) - a.reflectZeros.Set(T, id) - } - if a.addLabel(c.result, id) { - changed = true - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰Zero(a *analysis, cgn *cgnode) { - a.addConstraint(&reflectZeroConstraint{ - cgn: cgn, - typ: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// -------------------- (*reflect.rtype) methods -------------------- - -// ---------- func (*rtype) Elem() Type ---------- - -// result = Elem(t) -type rtypeElemConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rtypeElemConstraint) ptr() nodeid { return c.t } -func (c *rtypeElemConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rtypeElem.result") -} -func (c *rtypeElemConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *rtypeElemConstraint) String() string { - return fmt.Sprintf("n%d = (*reflect.rtype).Elem(n%d)", c.result, c.t) -} - -func (c *rtypeElemConstraint) solve(a *analysis, delta *nodeset) { - // Implemented by *types.{Map,Chan,Array,Slice,Pointer}. - type hasElem interface { - Elem() types.Type - } - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.nodes[tObj].obj.data.(types.Type) - if tHasElem, ok := T.Underlying().(hasElem); ok { - if a.addLabel(c.result, a.makeRtype(tHasElem.Elem())) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰rtype۰Elem(a *analysis, cgn *cgnode) { - a.addConstraint(&rtypeElemConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (*rtype) Field(int) StructField ---------- -// ---------- func (*rtype) FieldByName(string) (StructField, bool) ---------- - -// result = FieldByName(t, name) -// result = Field(t, _) -type rtypeFieldByNameConstraint struct { - cgn *cgnode - name string // name of field; "" for unknown - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rtypeFieldByNameConstraint) ptr() nodeid { return c.t } -func (c *rtypeFieldByNameConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result+3), "rtypeFieldByName.result.Type") -} -func (c *rtypeFieldByNameConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *rtypeFieldByNameConstraint) String() string { - return fmt.Sprintf("n%d = (*reflect.rtype).FieldByName(n%d, %q)", c.result, c.t, c.name) -} - -func (c *rtypeFieldByNameConstraint) solve(a *analysis, delta *nodeset) { - // type StructField struct { - // 0 __identity__ - // 1 Name string - // 2 PkgPath string - // 3 Type Type - // 4 Tag StructTag - // 5 Offset uintptr - // 6 Index []int - // 7 Anonymous bool - // } - - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.nodes[tObj].obj.data.(types.Type) - tStruct, ok := T.Underlying().(*types.Struct) - if !ok { - continue // not a struct type - } - - n := tStruct.NumFields() - for i := 0; i < n; i++ { - f := tStruct.Field(i) - if c.name == "" || c.name == f.Name() { - - // a.offsetOf(Type) is 3. - if id := c.result + 3; a.addLabel(id, a.makeRtype(f.Type())) { - a.addWork(id) - } - // TODO(adonovan): StructField.Index should be non-nil. - } - } - } -} - -func ext۰reflect۰rtype۰FieldByName(a *analysis, cgn *cgnode) { - // If we have access to the callsite, - // and the argument is a string constant, - // return only that field. - var name string - if site := cgn.callersite; site != nil { - if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - name = constant.StringVal(c.Value) - } - } - - a.addConstraint(&rtypeFieldByNameConstraint{ - cgn: cgn, - name: name, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰rtype۰Field(a *analysis, cgn *cgnode) { - // No-one ever calls Field with a constant argument, - // so we don't specialize that case. - a.addConstraint(&rtypeFieldByNameConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰rtype۰FieldByIndex(a *analysis, cgn *cgnode) {} // TODO(adonovan) -func ext۰reflect۰rtype۰FieldByNameFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan) - -// ---------- func (*rtype) In/Out(i int) Type ---------- - -// result = In/Out(t, i) -type rtypeInOutConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) - out bool - i int // -ve if not a constant -} - -func (c *rtypeInOutConstraint) ptr() nodeid { return c.t } -func (c *rtypeInOutConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rtypeInOut.result") -} -func (c *rtypeInOutConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *rtypeInOutConstraint) String() string { - return fmt.Sprintf("n%d = (*reflect.rtype).InOut(n%d, %d)", c.result, c.t, c.i) -} - -func (c *rtypeInOutConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.nodes[tObj].obj.data.(types.Type) - sig, ok := T.Underlying().(*types.Signature) - if !ok { - continue // not a func type - } - - tuple := sig.Params() - if c.out { - tuple = sig.Results() - } - for i, n := 0, tuple.Len(); i < n; i++ { - if c.i < 0 || c.i == i { - if a.addLabel(c.result, a.makeRtype(tuple.At(i).Type())) { - changed = true - } - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰rtype۰InOut(a *analysis, cgn *cgnode, out bool) { - // If we have access to the callsite, - // and the argument is an int constant, - // return only that parameter. - index := -1 - if site := cgn.callersite; site != nil { - if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - index = int(c.Int64()) - } - } - a.addConstraint(&rtypeInOutConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - out: out, - i: index, - }) -} - -func ext۰reflect۰rtype۰In(a *analysis, cgn *cgnode) { - ext۰reflect۰rtype۰InOut(a, cgn, false) -} - -func ext۰reflect۰rtype۰Out(a *analysis, cgn *cgnode) { - ext۰reflect۰rtype۰InOut(a, cgn, true) -} - -// ---------- func (*rtype) Key() Type ---------- - -// result = Key(t) -type rtypeKeyConstraint struct { - cgn *cgnode - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rtypeKeyConstraint) ptr() nodeid { return c.t } -func (c *rtypeKeyConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result), "rtypeKey.result") -} -func (c *rtypeKeyConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *rtypeKeyConstraint) String() string { - return fmt.Sprintf("n%d = (*reflect.rtype).Key(n%d)", c.result, c.t) -} - -func (c *rtypeKeyConstraint) solve(a *analysis, delta *nodeset) { - changed := false - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.nodes[tObj].obj.data.(types.Type) - if tMap, ok := T.Underlying().(*types.Map); ok { - if a.addLabel(c.result, a.makeRtype(tMap.Key())) { - changed = true - } - } - } - if changed { - a.addWork(c.result) - } -} - -func ext۰reflect۰rtype۰Key(a *analysis, cgn *cgnode) { - a.addConstraint(&rtypeKeyConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// ---------- func (*rtype) Method(int) (Method, bool) ---------- -// ---------- func (*rtype) MethodByName(string) (Method, bool) ---------- - -// result = MethodByName(t, name) -// result = Method(t, _) -type rtypeMethodByNameConstraint struct { - cgn *cgnode - name string // name of method; "" for unknown - t nodeid // (ptr) - result nodeid // (indirect) -} - -func (c *rtypeMethodByNameConstraint) ptr() nodeid { return c.t } -func (c *rtypeMethodByNameConstraint) presolve(h *hvn) { - h.markIndirect(onodeid(c.result+3), "rtypeMethodByName.result.Type") - h.markIndirect(onodeid(c.result+4), "rtypeMethodByName.result.Func") -} -func (c *rtypeMethodByNameConstraint) renumber(mapping []nodeid) { - c.t = mapping[c.t] - c.result = mapping[c.result] -} - -func (c *rtypeMethodByNameConstraint) String() string { - return fmt.Sprintf("n%d = (*reflect.rtype).MethodByName(n%d, %q)", c.result, c.t, c.name) -} - -// changeRecv returns sig with Recv prepended to Params(). -func changeRecv(sig *types.Signature) *types.Signature { - params := sig.Params() - n := params.Len() - p2 := make([]*types.Var, n+1) - p2[0] = sig.Recv() - for i := 0; i < n; i++ { - p2[i+1] = params.At(i) - } - return types.NewSignature(nil, types.NewTuple(p2...), sig.Results(), sig.Variadic()) -} - -func (c *rtypeMethodByNameConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - tObj := nodeid(x) - T := a.nodes[tObj].obj.data.(types.Type) - - isIface := isInterface(T) - - // We don't use Lookup(c.name) when c.name != "" to avoid - // ambiguity: >1 unexported methods could match. - mset := a.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - sel := mset.At(i) - if c.name == "" || c.name == sel.Obj().Name() { - // type Method struct { - // 0 __identity__ - // 1 Name string - // 2 PkgPath string - // 3 Type Type - // 4 Func Value - // 5 Index int - // } - - var sig *types.Signature - var fn *ssa.Function - if isIface { - sig = sel.Type().(*types.Signature) - } else { - fn = a.prog.MethodValue(sel) - // move receiver to params[0] - sig = changeRecv(fn.Signature) - } - - // a.offsetOf(Type) is 3. - if id := c.result + 3; a.addLabel(id, a.makeRtype(sig)) { - a.addWork(id) - } - if fn != nil { - // a.offsetOf(Func) is 4. - if id := c.result + 4; a.addLabel(id, a.objectNode(nil, fn)) { - a.addWork(id) - } - } - } - } - } -} - -func ext۰reflect۰rtype۰MethodByName(a *analysis, cgn *cgnode) { - // If we have access to the callsite, - // and the argument is a string constant, - // return only that method. - var name string - if site := cgn.callersite; site != nil { - if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok { - name = constant.StringVal(c.Value) - } - } - - a.addConstraint(&rtypeMethodByNameConstraint{ - cgn: cgn, - name: name, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -func ext۰reflect۰rtype۰Method(a *analysis, cgn *cgnode) { - // No-one ever calls Method with a constant argument, - // so we don't specialize that case. - a.addConstraint(&rtypeMethodByNameConstraint{ - cgn: cgn, - t: a.funcParams(cgn.obj), - result: a.funcResults(cgn.obj), - }) -} - -// typeHeight returns the "height" of the type, which is roughly -// speaking the number of chan, map, pointer and slice type constructors -// at the root of T; these are the four type kinds that can be created -// via reflection. Chan and map constructors are counted as double the -// height of slice and pointer constructors since they are less often -// deeply nested. -// -// The solver rules for type constructors must somehow bound the set of -// types they create to ensure termination of the algorithm in cases -// where the output of a type constructor flows to its input, e.g. -// -// func f(t reflect.Type) { -// f(reflect.PtrTo(t)) -// } -// -// It does this by limiting the type height to k, but this still leaves -// a potentially exponential (4^k) number of of types that may be -// enumerated in pathological cases. -func typeHeight(T types.Type) int { - switch T := T.(type) { - case *types.Chan: - return 2 + typeHeight(T.Elem()) - case *types.Map: - k := typeHeight(T.Key()) - v := typeHeight(T.Elem()) - if v > k { - k = v // max(k, v) - } - return 2 + k - case *types.Slice: - return 1 + typeHeight(T.Elem()) - case *types.Pointer: - return 1 + typeHeight(T.Elem()) - } - return 0 -} - -func typeTooHigh(T types.Type) bool { - return typeHeight(T) > 3 -} diff --git a/go/pointer/solve.go b/go/pointer/solve.go deleted file mode 100644 index 7a41b78a864..00000000000 --- a/go/pointer/solve.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -// This file defines a naive Andersen-style solver for the inclusion -// constraint system. - -import ( - "fmt" - "go/types" -) - -type solverState struct { - complex []constraint // complex constraints attached to this node - copyTo nodeset // simple copy constraint edges - pts nodeset // points-to set of this node - prevPTS nodeset // pts(n) in previous iteration (for difference propagation) -} - -func (a *analysis) solve() { - start("Solving") - if a.log != nil { - fmt.Fprintf(a.log, "\n\n==== Solving constraints\n\n") - } - - // Solver main loop. - var delta nodeset - for { - // Add new constraints to the graph: - // static constraints from SSA on round 1, - // dynamic constraints from reflection thereafter. - a.processNewConstraints() - - var x int - if !a.work.TakeMin(&x) { - break // empty - } - id := nodeid(x) - if a.log != nil { - fmt.Fprintf(a.log, "\tnode n%d\n", id) - } - - n := a.nodes[id] - - // Difference propagation. - delta.Difference(&n.solve.pts.Sparse, &n.solve.prevPTS.Sparse) - if delta.IsEmpty() { - continue - } - if a.log != nil { - fmt.Fprintf(a.log, "\t\tpts(n%d : %s) = %s + %s\n", - id, n.typ, &delta, &n.solve.prevPTS) - } - n.solve.prevPTS.Copy(&n.solve.pts.Sparse) - - // Apply all resolution rules attached to n. - a.solveConstraints(n, &delta) - - if a.log != nil { - fmt.Fprintf(a.log, "\t\tpts(n%d) = %s\n", id, &n.solve.pts) - } - } - - if !a.nodes[0].solve.pts.IsEmpty() { - panic(fmt.Sprintf("pts(0) is nonempty: %s", &a.nodes[0].solve.pts)) - } - - // Release working state (but keep final PTS). - for _, n := range a.nodes { - n.solve.complex = nil - n.solve.copyTo.Clear() - n.solve.prevPTS.Clear() - } - - if a.log != nil { - fmt.Fprintf(a.log, "Solver done\n") - - // Dump solution. - for i, n := range a.nodes { - if !n.solve.pts.IsEmpty() { - fmt.Fprintf(a.log, "pts(n%d) = %s : %s\n", i, &n.solve.pts, n.typ) - } - } - } - stop("Solving") -} - -// processNewConstraints takes the new constraints from a.constraints -// and adds them to the graph, ensuring -// that new constraints are applied to pre-existing labels and -// that pre-existing constraints are applied to new labels. -func (a *analysis) processNewConstraints() { - // Take the slice of new constraints. - // (May grow during call to solveConstraints.) - constraints := a.constraints - a.constraints = nil - - // Initialize points-to sets from addr-of (base) constraints. - for _, c := range constraints { - if c, ok := c.(*addrConstraint); ok { - dst := a.nodes[c.dst] - dst.solve.pts.add(c.src) - - // Populate the worklist with nodes that point to - // something initially (due to addrConstraints) and - // have other constraints attached. - // (A no-op in round 1.) - if !dst.solve.copyTo.IsEmpty() || len(dst.solve.complex) > 0 { - a.addWork(c.dst) - } - } - } - - // Attach simple (copy) and complex constraints to nodes. - var stale nodeset - for _, c := range constraints { - var id nodeid - switch c := c.(type) { - case *addrConstraint: - // base constraints handled in previous loop - continue - case *copyConstraint: - // simple (copy) constraint - id = c.src - a.nodes[id].solve.copyTo.add(c.dst) - default: - // complex constraint - id = c.ptr() - solve := a.nodes[id].solve - solve.complex = append(solve.complex, c) - } - - if n := a.nodes[id]; !n.solve.pts.IsEmpty() { - if !n.solve.prevPTS.IsEmpty() { - stale.add(id) - } - a.addWork(id) - } - } - // Apply new constraints to pre-existing PTS labels. - var space [50]int - for _, id := range stale.AppendTo(space[:0]) { - n := a.nodes[nodeid(id)] - a.solveConstraints(n, &n.solve.prevPTS) - } -} - -// solveConstraints applies each resolution rule attached to node n to -// the set of labels delta. It may generate new constraints in -// a.constraints. -func (a *analysis) solveConstraints(n *node, delta *nodeset) { - if delta.IsEmpty() { - return - } - - // Process complex constraints dependent on n. - for _, c := range n.solve.complex { - if a.log != nil { - fmt.Fprintf(a.log, "\t\tconstraint %s\n", c) - } - c.solve(a, delta) - } - - // Process copy constraints. - var copySeen nodeset - for _, x := range n.solve.copyTo.AppendTo(a.deltaSpace) { - mid := nodeid(x) - if copySeen.add(mid) { - if a.nodes[mid].solve.pts.addAll(delta) { - a.addWork(mid) - } - } - } -} - -// addLabel adds label to the points-to set of ptr and reports whether the set grew. -func (a *analysis) addLabel(ptr, label nodeid) bool { - b := a.nodes[ptr].solve.pts.add(label) - if b && a.log != nil { - fmt.Fprintf(a.log, "\t\tpts(n%d) += n%d\n", ptr, label) - } - return b -} - -func (a *analysis) addWork(id nodeid) { - a.work.Insert(int(id)) - if a.log != nil { - fmt.Fprintf(a.log, "\t\twork: n%d\n", id) - } -} - -// onlineCopy adds a copy edge. It is called online, i.e. during -// solving, so it adds edges and pts members directly rather than by -// instantiating a 'constraint'. -// -// The size of the copy is implicitly 1. -// It returns true if pts(dst) changed. -func (a *analysis) onlineCopy(dst, src nodeid) bool { - if dst != src { - if nsrc := a.nodes[src]; nsrc.solve.copyTo.add(dst) { - if a.log != nil { - fmt.Fprintf(a.log, "\t\t\tdynamic copy n%d <- n%d\n", dst, src) - } - // TODO(adonovan): most calls to onlineCopy - // are followed by addWork, possibly batched - // via a 'changed' flag; see if there's a - // noticeable penalty to calling addWork here. - return a.nodes[dst].solve.pts.addAll(&nsrc.solve.pts) - } - } - return false -} - -// Returns sizeof. -// Implicitly adds nodes to worklist. -// -// TODO(adonovan): now that we support a.copy() during solving, we -// could eliminate onlineCopyN, but it's much slower. Investigate. -func (a *analysis) onlineCopyN(dst, src nodeid, sizeof uint32) uint32 { - for i := uint32(0); i < sizeof; i++ { - if a.onlineCopy(dst, src) { - a.addWork(dst) - } - src++ - dst++ - } - return sizeof -} - -func (c *loadConstraint) solve(a *analysis, delta *nodeset) { - var changed bool - for _, x := range delta.AppendTo(a.deltaSpace) { - k := nodeid(x) - koff := k + nodeid(c.offset) - if a.onlineCopy(c.dst, koff) { - changed = true - } - } - if changed { - a.addWork(c.dst) - } -} - -func (c *storeConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - k := nodeid(x) - koff := k + nodeid(c.offset) - if a.onlineCopy(koff, c.src) { - a.addWork(koff) - } - } -} - -func (c *offsetAddrConstraint) solve(a *analysis, delta *nodeset) { - dst := a.nodes[c.dst] - for _, x := range delta.AppendTo(a.deltaSpace) { - k := nodeid(x) - if dst.solve.pts.add(k + nodeid(c.offset)) { - a.addWork(c.dst) - } - } -} - -func (c *typeFilterConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - ifaceObj := nodeid(x) - tDyn, _, indirect := a.taggedValue(ifaceObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - if types.AssignableTo(tDyn, c.typ) { - if a.addLabel(c.dst, ifaceObj) { - a.addWork(c.dst) - } - } - } -} - -func (c *untagConstraint) solve(a *analysis, delta *nodeset) { - predicate := types.AssignableTo - if c.exact { - predicate = types.Identical - } - for _, x := range delta.AppendTo(a.deltaSpace) { - ifaceObj := nodeid(x) - tDyn, v, indirect := a.taggedValue(ifaceObj) - if indirect { - // TODO(adonovan): we'll need to implement this - // when we start creating indirect tagged objects. - panic("indirect tagged object") - } - - if predicate(tDyn, c.typ) { - // Copy payload sans tag to dst. - // - // TODO(adonovan): opt: if tDyn is - // nonpointerlike we can skip this entire - // constraint, perhaps. We only care about - // pointers among the fields. - a.onlineCopyN(c.dst, v, a.sizeof(tDyn)) - } - } -} - -func (c *invokeConstraint) solve(a *analysis, delta *nodeset) { - for _, x := range delta.AppendTo(a.deltaSpace) { - ifaceObj := nodeid(x) - tDyn, v, indirect := a.taggedValue(ifaceObj) - if indirect { - // TODO(adonovan): we may need to implement this if - // we ever apply invokeConstraints to reflect.Value PTSs, - // e.g. for (reflect.Value).Call. - panic("indirect tagged object") - } - - // Look up the concrete method. - fn := a.prog.LookupMethod(tDyn, c.method.Pkg(), c.method.Name()) - if fn == nil { - panic(fmt.Sprintf("n%d: no ssa.Function for %s", c.iface, c.method)) - } - sig := fn.Signature - - fnObj := a.globalobj[fn] // dynamic calls use shared contour - if fnObj == 0 { - // a.objectNode(fn) was not called during gen phase. - panic(fmt.Sprintf("a.globalobj[%s]==nil", fn)) - } - - // Make callsite's fn variable point to identity of - // concrete method. (There's no need to add it to - // worklist since it never has attached constraints.) - a.addLabel(c.params, fnObj) - - // Extract value and connect to method's receiver. - // Copy payload to method's receiver param (arg0). - arg0 := a.funcParams(fnObj) - recvSize := a.sizeof(sig.Recv().Type()) - a.onlineCopyN(arg0, v, recvSize) - - src := c.params + 1 // skip past identity - dst := arg0 + nodeid(recvSize) - - // Copy caller's argument block to method formal parameters. - paramsSize := a.sizeof(sig.Params()) - a.onlineCopyN(dst, src, paramsSize) - src += nodeid(paramsSize) - dst += nodeid(paramsSize) - - // Copy method results to caller's result block. - resultsSize := a.sizeof(sig.Results()) - a.onlineCopyN(src, dst, resultsSize) - } -} - -func (c *addrConstraint) solve(a *analysis, delta *nodeset) { - panic("addr is not a complex constraint") -} - -func (c *copyConstraint) solve(a *analysis, delta *nodeset) { - panic("copy is not a complex constraint") -} diff --git a/go/pointer/stdlib_test.go b/go/pointer/stdlib_test.go deleted file mode 100644 index 978cfb8fed1..00000000000 --- a/go/pointer/stdlib_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Incomplete source tree on Android. - -//go:build !android -// +build !android - -package pointer - -// This file runs the pointer analysis on all packages and tests beneath -// $GOROOT. It provides a "smoke test" that the analysis doesn't crash -// on a large input, and a benchmark for performance measurement. -// -// Because it is relatively slow, the --stdlib flag must be enabled for -// this test to run: -// % go test -v golang.org/x/tools/go/pointer --stdlib - -import ( - "flag" - "go/token" - "testing" - "time" - - "golang.org/x/tools/go/packages" - "golang.org/x/tools/go/ssa" - "golang.org/x/tools/go/ssa/ssautil" -) - -var runStdlibTest = flag.Bool("stdlib", false, "Run the (slow) stdlib test") - -func TestStdlib(t *testing.T) { - if !*runStdlibTest { - t.Skip("skipping (slow) stdlib test (use --stdlib)") - } - - cfg := &packages.Config{ - Mode: packages.LoadAllSyntax, - // Create test main packages with a main function. - Tests: true, - } - pkgs, err := packages.Load(cfg, "std") - if err != nil || packages.PrintErrors(pkgs) > 0 { - t.Fatalf("Load failed: %v", err) - } - - // Create SSA packages. - prog, _ := ssautil.AllPackages(pkgs, ssa.InstantiateGenerics) - prog.Build() - - numPkgs := len(prog.AllPackages()) - if want := 240; numPkgs < want { - t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want) - } - - // Determine the set of packages/tests to analyze. - var mains []*ssa.Package - for _, ssapkg := range prog.AllPackages() { - if ssapkg.Pkg.Name() == "main" && ssapkg.Func("main") != nil { - mains = append(mains, ssapkg) - } - } - if mains == nil { - t.Fatal("no tests found in analysis scope") - } - - // Run the analysis. - config := &Config{ - Reflection: false, // TODO(adonovan): fix remaining bug in rVCallConstraint, then enable. - BuildCallGraph: true, - Mains: mains, - } - // TODO(adonovan): add some query values (affects track bits). - - t0 := time.Now() - - result, err := Analyze(config) - if err != nil { - t.Fatal(err) // internal error in pointer analysis - } - _ = result // TODO(adonovan): measure something - - t1 := time.Now() - - // Dump some statistics. - allFuncs := ssautil.AllFunctions(prog) - var numInstrs int - for fn := range allFuncs { - for _, b := range fn.Blocks { - numInstrs += len(b.Instrs) - } - } - - // determine line count - var lineCount int - prog.Fset.Iterate(func(f *token.File) bool { - lineCount += f.LineCount() - return true - }) - - t.Log("#Source lines: ", lineCount) - t.Log("#Instructions: ", numInstrs) - t.Log("Pointer analysis: ", t1.Sub(t0)) -} diff --git a/go/pointer/testdata/a_test.go b/go/pointer/testdata/a_test.go deleted file mode 100644 index c6058a0d2a2..00000000000 --- a/go/pointer/testdata/a_test.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build ignore -// +build ignore - -package a - -// This test exercises the synthesis of testmain packages for tests. -// The test framework doesn't directly let us perform negative -// assertions (i.e. that TestingQuux isn't called, or that its -// parameter's PTS is empty) so this test is rather roundabout. - -import "testing" - -func log(f func(*testing.T)) { - // The PTS of f is the set of called tests. TestingQuux is not present. - print(f) // @pointsto command-line-arguments.Test | command-line-arguments.TestFoo -} - -func Test(t *testing.T) { - // Don't assert @pointsto(t) since its label contains a fragile line number. - log(Test) -} - -func TestFoo(t *testing.T) { - // Don't assert @pointsto(t) since its label contains a fragile line number. - log(TestFoo) -} - -func TestingQuux(t *testing.T) { - // We can't assert @pointsto(t) since this is dead code. - log(TestingQuux) -} - -func BenchmarkFoo(b *testing.B) { -} - -func ExampleBar() { - // Output: -} - -// Excludes TestingQuux. -// @calls testing.tRunner -> command-line-arguments.Test -// @calls testing.tRunner -> command-line-arguments.TestFoo -// @calls (*testing.B).runN -> command-line-arguments.BenchmarkFoo -// @calls testing.runExample -> command-line-arguments.ExampleBar diff --git a/go/pointer/testdata/another.go b/go/pointer/testdata/another.go deleted file mode 100644 index 75b92c523a8..00000000000 --- a/go/pointer/testdata/another.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -var unknown bool - -type S string - -func incr(x int) int { return x + 1 } - -func main() { - var i interface{} - i = 1 - if unknown { - i = S("foo") - } - if unknown { - i = (func(int, int))(nil) // NB type compares equal to that below. - } - // Look, the test harness can handle equal-but-not-String-equal - // types because we parse types and using a typemap. - if unknown { - i = (func(x int, y int))(nil) - } - if unknown { - i = incr - } - print(i) // @types int | S | func(int, int) | func(int) int - - // NB, an interface may never directly alias any global - // labels, even though it may contain pointers that do. - print(i) // @pointsto makeinterface:func(x int) int | makeinterface:func(x int, y int) | makeinterface:func(int, int) | makeinterface:int | makeinterface:command-line-arguments.S - print(i.(func(int) int)) // @pointsto command-line-arguments.incr - - print() // regression test for crash -} diff --git a/go/pointer/testdata/arrayreflect.go b/go/pointer/testdata/arrayreflect.go deleted file mode 100644 index 18c8707f68e..00000000000 --- a/go/pointer/testdata/arrayreflect.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Test of arrays & slices with reflection. - -import "reflect" - -var a, b int - -type S string - -func reflectValueSlice() { - // reflect.Value contains a slice. - slice := make([]*int, 10) // @line slice - slice[0] = &a - rvsl := reflect.ValueOf(slice).Slice(0, 0) - print(rvsl.Interface()) // @types []*int - print(rvsl.Interface().([]*int)) // @pointsto makeslice@slice:15 - print(rvsl.Interface().([]*int)[42]) // @pointsto command-line-arguments.a - - // reflect.Value contains an array (non-addressable). - array := [10]*int{&a} // @line array - rvarray := reflect.ValueOf(array).Slice(0, 0) - print(rvarray.Interface()) // @types - print(rvarray.Interface().([]*int)) // @pointsto - print(rvarray.Interface().([]*int)[42]) // @pointsto - - // reflect.Value contains a pointer-to-array - rvparray := reflect.ValueOf(&array).Slice(0, 0) - print(rvparray.Interface()) // @types []*int - print(rvparray.Interface().([]*int)) // @pointsto array@array:2 - print(rvparray.Interface().([]*int)[42]) // @pointsto command-line-arguments.a - - // reflect.Value contains a string. - rvstring := reflect.ValueOf("hi").Slice(0, 0) - print(rvstring.Interface()) // @types string - - // reflect.Value contains a (named) string type. - rvS := reflect.ValueOf(S("hi")).Slice(0, 0) - print(rvS.Interface()) // @types S - - // reflect.Value contains a non-array pointer. - rvptr := reflect.ValueOf(new(int)).Slice(0, 0) - print(rvptr.Interface()) // @types - - // reflect.Value contains a non-string basic type. - rvint := reflect.ValueOf(3).Slice(0, 0) - print(rvint.Interface()) // @types -} - -func reflectValueBytes() { - sl1 := make([]byte, 0) // @line ar5sl1 - sl2 := make([]byte, 0) // @line ar5sl2 - - rvsl1 := reflect.ValueOf(sl1) - print(rvsl1.Interface()) // @types []byte - print(rvsl1.Interface().([]byte)) // @pointsto makeslice@ar5sl1:13 - print(rvsl1.Bytes()) // @pointsto makeslice@ar5sl1:13 - - rvsl2 := reflect.ValueOf(123) - rvsl2.SetBytes(sl2) - print(rvsl2.Interface()) // @types int - print(rvsl2.Interface().([]byte)) // @pointsto - print(rvsl2.Bytes()) // @pointsto - - rvsl3 := reflect.ValueOf([]byte(nil)) - rvsl3.SetBytes(sl2) - print(rvsl3.Interface()) // @types []byte - print(rvsl3.Interface().([]byte)) // @pointsto makeslice@ar5sl2:13 - print(rvsl3.Bytes()) // @pointsto makeslice@ar5sl2:13 -} - -func reflectValueIndex() { - slice := []*int{&a} // @line ar6slice - rv1 := reflect.ValueOf(slice) - print(rv1.Index(42).Interface()) // @types *int - print(rv1.Index(42).Interface().(*int)) // @pointsto command-line-arguments.a - - array := [10]*int{&a} - rv2 := reflect.ValueOf(array) - print(rv2.Index(42).Interface()) // @types *int - print(rv2.Index(42).Interface().(*int)) // @pointsto command-line-arguments.a - - rv3 := reflect.ValueOf("string") - print(rv3.Index(42).Interface()) // @types rune - - rv4 := reflect.ValueOf(&array) - print(rv4.Index(42).Interface()) // @types - - rv5 := reflect.ValueOf(3) - print(rv5.Index(42).Interface()) // @types -} - -func reflectValueElem() { - // Interface. - var iface interface{} = &a - rv1 := reflect.ValueOf(&iface).Elem() - print(rv1.Interface()) // @types *int - print(rv1.Interface().(*int)) // @pointsto command-line-arguments.a - print(rv1.Elem().Interface()) // @types *int - print(rv1.Elem().Interface().(*int)) // @pointsto command-line-arguments.a - - print(reflect.ValueOf(new(interface{})).Elem().Elem()) // @types - - // Pointer. - ptr := &a - rv2 := reflect.ValueOf(&ptr) - print(rv2.Elem().Interface()) // @types *int - print(rv2.Elem().Interface().(*int)) // @pointsto command-line-arguments.a - - // No other type works with (rV).Elem, not even those that - // work with (rT).Elem: slice, array, map, chan. - - rv3 := reflect.ValueOf([]*int{&a}) - print(rv3.Elem().Interface()) // @types - - rv4 := reflect.ValueOf([10]*int{&a}) - print(rv4.Elem().Interface()) // @types - - rv5 := reflect.ValueOf(map[*int]*int{&a: &b}) - print(rv5.Elem().Interface()) // @types - - ch := make(chan *int) - ch <- &a - rv6 := reflect.ValueOf(ch) - print(rv6.Elem().Interface()) // @types - - rv7 := reflect.ValueOf(3) - print(rv7.Elem().Interface()) // @types -} - -func reflectTypeElem() { - rt1 := reflect.TypeOf(make([]*int, 0)) - print(reflect.Zero(rt1.Elem())) // @types *int - - rt2 := reflect.TypeOf([10]*int{}) - print(reflect.Zero(rt2.Elem())) // @types *int - - rt3 := reflect.TypeOf(map[*int]*int{}) - print(reflect.Zero(rt3.Elem())) // @types *int - - rt4 := reflect.TypeOf(make(chan *int)) - print(reflect.Zero(rt4.Elem())) // @types *int - - ptr := &a - rt5 := reflect.TypeOf(&ptr) - print(reflect.Zero(rt5.Elem())) // @types *int - - rt6 := reflect.TypeOf(3) - print(reflect.Zero(rt6.Elem())) // @types -} - -func reflectPtrTo() { - tInt := reflect.TypeOf(3) - tPtrInt := reflect.PtrTo(tInt) - print(reflect.Zero(tPtrInt)) // @types *int - tPtrPtrInt := reflect.PtrTo(tPtrInt) - print(reflect.Zero(tPtrPtrInt)) // @types **int -} - -func reflectSliceOf() { - tInt := reflect.TypeOf(3) - tSliceInt := reflect.SliceOf(tInt) - print(reflect.Zero(tSliceInt)) // @types []int -} - -type T struct{ x int } - -func reflectMakeSlice() { - rt := []reflect.Type{ - reflect.TypeOf(3), - reflect.TypeOf([]int{}), - reflect.TypeOf([]T{}), - }[0] - sl := reflect.MakeSlice(rt, 0, 0) - print(sl) // @types []int | []T - print(sl) // @pointsto | - print(&sl.Interface().([]T)[0].x) // @pointsto [*].x -} - -func main() { - reflectValueSlice() - reflectValueBytes() - reflectValueIndex() - reflectValueElem() - reflectTypeElem() - reflectPtrTo() - reflectSliceOf() - reflectMakeSlice() -} diff --git a/go/pointer/testdata/arrays.go b/go/pointer/testdata/arrays.go deleted file mode 100644 index 96498f512e8..00000000000 --- a/go/pointer/testdata/arrays.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -var unknown bool // defeat dead-code elimination - -var a, b int - -func array1() { - sliceA := make([]*int, 10) // @line a1make - sliceA[0] = &a - - var sliceB []*int - sliceB = append(sliceB, &b) // @line a1append - - print(sliceA) // @pointsto makeslice@a1make:16 - print(sliceA[0]) // @pointsto command-line-arguments.a - - print(sliceB) // @pointsto append@a1append:17 - print(sliceB[100]) // @pointsto command-line-arguments.b -} - -func array2() { - sliceA := make([]*int, 10) // @line a2make - sliceA[0] = &a - - sliceB := sliceA[:] - - print(sliceA) // @pointsto makeslice@a2make:16 - print(sliceA[0]) // @pointsto command-line-arguments.a - - print(sliceB) // @pointsto makeslice@a2make:16 - print(sliceB[0]) // @pointsto command-line-arguments.a -} - -func array3() { - a := []interface{}{"", 1} - b := []interface{}{true, func() {}} - print(a[0]) // @types string | int - print(b[0]) // @types bool | func() -} - -// Test of append, copy, slice. -func array4() { - var s2 struct { // @line a4L0 - a [3]int - b struct{ c, d int } - } - var sl1 = make([]*int, 10) // @line a4make - var someint int // @line a4L1 - sl1[1] = &someint - sl2 := append(sl1, &s2.a[1]) // @line a4append1 - print(sl1) // @pointsto makeslice@a4make:16 - print(sl2) // @pointsto append@a4append1:15 | makeslice@a4make:16 - print(sl1[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6 - print(sl2[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6 - - // In z=append(x,y) we should observe flow from y[*] to x[*]. - var sl3 = make([]*int, 10) // @line a4L2 - _ = append(sl3, &s2.a[1]) - print(sl3) // @pointsto makeslice@a4L2:16 - print(sl3[0]) // @pointsto s2.a[*]@a4L0:6 - - var sl4 = []*int{&a} // @line a4L3 - sl4a := append(sl4) // @line a4L4 - print(sl4a) // @pointsto slicelit@a4L3:18 | append@a4L4:16 - print(&sl4a[0]) // @pointsto slicelit[*]@a4L3:18 | append[*]@a4L4:16 - print(sl4a[0]) // @pointsto command-line-arguments.a - - var sl5 = []*int{&b} // @line a4L5 - copy(sl5, sl4) - print(sl5) // @pointsto slicelit@a4L5:18 - print(&sl5[0]) // @pointsto slicelit[*]@a4L5:18 - print(sl5[0]) // @pointsto command-line-arguments.b | command-line-arguments.a - - var sl6 = sl5[:0] - print(sl6) // @pointsto slicelit@a4L5:18 - print(&sl6[0]) // @pointsto slicelit[*]@a4L5:18 - print(sl6[0]) // @pointsto command-line-arguments.b | command-line-arguments.a -} - -func array5() { - var arr [2]*int - arr[0] = &a - arr[1] = &b - - var n int - print(arr[n]) // @pointsto command-line-arguments.a | command-line-arguments.b -} - -func main() { - array1() - array2() - array3() - array4() - array5() -} diff --git a/go/pointer/testdata/arrays_go117.go b/go/pointer/testdata/arrays_go117.go deleted file mode 100644 index 7ad9f5f35c4..00000000000 --- a/go/pointer/testdata/arrays_go117.go +++ /dev/null @@ -1,173 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Forked from arrays.go. Requires go1.17 to parse slice to array casts. -// TODO(taking): Merge back into arrays.go once we can assume go1.17. - -var unknown bool // defeat dead-code elimination - -var a, b int - -func array1() { - sliceA := make([]*int, 10) // @line a1make - sliceA[0] = &a - - var sliceB []*int - sliceB = append(sliceB, &b) // @line a1append - - print(sliceA) // @pointsto makeslice@a1make:16 - print(sliceA[0]) // @pointsto command-line-arguments.a - - print(sliceB) // @pointsto append@a1append:17 - print(sliceB[100]) // @pointsto command-line-arguments.b -} - -func array2() { - sliceA := make([]*int, 10) // @line a2make - sliceA[0] = &a - - sliceB := sliceA[:] - - print(sliceA) // @pointsto makeslice@a2make:16 - print(sliceA[0]) // @pointsto command-line-arguments.a - - print(sliceB) // @pointsto makeslice@a2make:16 - print(sliceB[0]) // @pointsto command-line-arguments.a -} - -func array3() { - a := []interface{}{"", 1} - b := []interface{}{true, func() {}} - print(a[0]) // @types string | int - print(b[0]) // @types bool | func() -} - -// Test of append, copy, slice. -func array4() { - var s2 struct { // @line a4L0 - a [3]int - b struct{ c, d int } - } - var sl1 = make([]*int, 10) // @line a4make - var someint int // @line a4L1 - sl1[1] = &someint - sl2 := append(sl1, &s2.a[1]) // @line a4append1 - print(sl1) // @pointsto makeslice@a4make:16 - print(sl2) // @pointsto append@a4append1:15 | makeslice@a4make:16 - print(sl1[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6 - print(sl2[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6 - - // In z=append(x,y) we should observe flow from y[*] to x[*]. - var sl3 = make([]*int, 10) // @line a4L2 - _ = append(sl3, &s2.a[1]) - print(sl3) // @pointsto makeslice@a4L2:16 - print(sl3[0]) // @pointsto s2.a[*]@a4L0:6 - - var sl4 = []*int{&a} // @line a4L3 - sl4a := append(sl4) // @line a4L4 - print(sl4a) // @pointsto slicelit@a4L3:18 | append@a4L4:16 - print(&sl4a[0]) // @pointsto slicelit[*]@a4L3:18 | append[*]@a4L4:16 - print(sl4a[0]) // @pointsto command-line-arguments.a - - var sl5 = []*int{&b} // @line a4L5 - copy(sl5, sl4) - print(sl5) // @pointsto slicelit@a4L5:18 - print(&sl5[0]) // @pointsto slicelit[*]@a4L5:18 - print(sl5[0]) // @pointsto command-line-arguments.b | command-line-arguments.a - - var sl6 = sl5[:0] - print(sl6) // @pointsto slicelit@a4L5:18 - print(&sl6[0]) // @pointsto slicelit[*]@a4L5:18 - print(sl6[0]) // @pointsto command-line-arguments.b | command-line-arguments.a -} - -func array5() { - var arr [2]*int - arr[0] = &a - arr[1] = &b - - var n int - print(arr[n]) // @pointsto command-line-arguments.a | command-line-arguments.b -} - -func array6() { - var n int - - sl0 := []*int{&a} - ap0 := (*[1]*int)(sl0) - ar0 := *ap0 - - print(ap0[n]) // @pointsto command-line-arguments.a - print(sl0[n]) // @pointsto command-line-arguments.a - print(ar0[n]) // @pointsto command-line-arguments.a - - sl1 := []*int{&a} - ap1 := (*[1]*int)(sl1) - ar1 := *ap1 - - ar1[0] = &b - print(ap1[n]) // @pointsto command-line-arguments.a - print(sl1[n]) // @pointsto command-line-arguments.a - print(ar1[n]) // @pointsto command-line-arguments.a | command-line-arguments.b - - sl2 := []*int{&a} - ap2 := (*[1]*int)(sl2) - ar2 := *ap2 - - ap2[0] = &b - print(ap2[n]) // @pointsto command-line-arguments.a | command-line-arguments.b - print(sl2[n]) // @pointsto command-line-arguments.a | command-line-arguments.b - print(ar2[n]) // @pointsto command-line-arguments.a | command-line-arguments.b - - sl3 := []*int{&b, nil} - ap3 := (*[1]*int)(sl3) - ar3 := *ap3 - - print(sl3[n]) // @pointsto command-line-arguments.b - print(ap3[n]) // @pointsto command-line-arguments.b - print(ar3[n]) // @pointsto command-line-arguments.b -} - -func array7() { - var n int - - sl0 := []*int{nil, nil, nil} - ap0 := (*[2]*int)(sl0) - ap1 := (*[1]*int)(sl0[2:]) - - ap1[0] = &a - - print(sl0[n]) // @pointsto command-line-arguments.a - print(ap0[n]) // @pointsto command-line-arguments.a - print(ap1[n]) // @pointsto command-line-arguments.a -} - -func array8() { - var n int - - sl1 := make([]*int, 1, 1) - sl2 := make([]*int, 1, 1) - pa1 := (*[1]*int)(sl1) - pa2 := (*[1]*int)(sl2) - sl1[0] = &a - sl2[0] = &b - print(pa1[n]) // @pointsto command-line-arguments.a - print(pa2[n]) // @pointsto command-line-arguments.b - - pa2 = pa1 - print(pa1[n]) // @pointsto command-line-arguments.a - print(pa2[n]) // @pointsto command-line-arguments.a -} - -func main() { - array1() - array2() - array3() - array4() - array5() - array6() - array7() - array8() -} diff --git a/go/pointer/testdata/channels.go b/go/pointer/testdata/channels.go deleted file mode 100644 index c4f5150bf4c..00000000000 --- a/go/pointer/testdata/channels.go +++ /dev/null @@ -1,119 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -func incr(x int) int { return x + 1 } - -func decr(x int) int { return x - 1 } - -var unknown bool // defeat dead-code elimination - -func chan1() { - chA := make(chan func(int) int, 0) // @line c1makeA - chB := make(chan func(int) int, 0) // @line c1makeB - chA <- incr - chB <- decr - chB <- func(int) int { return 1 } - - print(chA) // @pointsto makechan@c1makeA:13 - print(<-chA) // @pointsto command-line-arguments.incr - - print(chB) // @pointsto makechan@c1makeB:13 - print(<-chB) // @pointsto command-line-arguments.decr | command-line-arguments.chan1$1 -} - -func chan2() { - chA := make(chan func(int) int, 0) // @line c2makeA - chB := make(chan func(int) int, 0) // @line c2makeB - chA <- incr - chB <- decr - chB <- func(int) int { return 1 } - - // Channels flow together. - // Labelsets remain distinct but elements are merged. - chAB := chA - if unknown { - chAB = chB - } - - print(chA) // @pointsto makechan@c2makeA:13 - print(<-chA) // @pointsto command-line-arguments.incr - - print(chB) // @pointsto makechan@c2makeB:13 - print(<-chB) // @pointsto command-line-arguments.decr | command-line-arguments.chan2$1 - - print(chAB) // @pointsto makechan@c2makeA:13 | makechan@c2makeB:13 - print(<-chAB) // @pointsto command-line-arguments.incr | command-line-arguments.decr | command-line-arguments.chan2$1 - - (<-chA)(3) -} - -// @calls command-line-arguments.chan2 -> command-line-arguments.incr - -func chan3() { - chA := make(chan func(int) int, 0) // @line c3makeA - chB := make(chan func(int) int, 0) // @line c3makeB - chA <- incr - chB <- decr - chB <- func(int) int { return 1 } - print(chA) // @pointsto makechan@c3makeA:13 - print(<-chA) // @pointsto command-line-arguments.incr - print(chB) // @pointsto makechan@c3makeB:13 - print(<-chB) // @pointsto command-line-arguments.decr | command-line-arguments.chan3$1 - - (<-chA)(3) -} - -// @calls command-line-arguments.chan3 -> command-line-arguments.incr - -func chan4() { - chA := make(chan func(int) int, 0) // @line c4makeA - chB := make(chan func(int) int, 0) // @line c4makeB - - select { - case chA <- incr: - case chB <- decr: - case a := <-chA: - print(a) // @pointsto command-line-arguments.incr - case b := <-chB: - print(b) // @pointsto command-line-arguments.decr - default: - print(chA) // @pointsto makechan@c4makeA:13 - print(chB) // @pointsto makechan@c4makeB:13 - } - - for k := range chA { - print(k) // @pointsto command-line-arguments.incr - } - // Exercise constraint generation (regtest for a crash). - for range chA { - } -} - -// Multi-word channel value in select with multiple receive cases. -// (Regtest for a crash.) -func chan5() { - type T struct { - x *int - y interface{} - } - ch := make(chan T) - ch <- T{new(int), incr} // @line ch5new - select { - case a := <-ch: - print(a.x) // @pointsto new@ch5new:13 - print(a.y) // @types func(x int) int - case b := <-ch: - print(b.x) // @pointsto new@ch5new:13 - print(b.y) // @types func(x int) int - } -} - -func main() { - chan1() - chan2() - chan3() - chan4() - chan5() -} diff --git a/go/pointer/testdata/chanreflect.go b/go/pointer/testdata/chanreflect.go deleted file mode 100644 index 21f78b61fe1..00000000000 --- a/go/pointer/testdata/chanreflect.go +++ /dev/null @@ -1,86 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import "reflect" - -// Test of channels with reflection. - -var a, b int - -func chanreflect1() { - ch := make(chan *int, 0) // @line cr1make - crv := reflect.ValueOf(ch) - crv.Send(reflect.ValueOf(&a)) - print(crv.Interface()) // @types chan *int - print(crv.Interface().(chan *int)) // @pointsto makechan@cr1make:12 - print(<-ch) // @pointsto command-line-arguments.a -} - -func chanreflect1i() { - // Exercises reflect.Value conversions to/from interfaces: - // a different code path than for concrete types. - ch := make(chan interface{}, 0) - reflect.ValueOf(ch).Send(reflect.ValueOf(&a)) - v := <-ch - print(v) // @types *int - print(v.(*int)) // @pointsto command-line-arguments.a -} - -func chanreflect2() { - ch := make(chan *int, 0) - ch <- &b - crv := reflect.ValueOf(ch) - r, _ := crv.Recv() - print(r.Interface()) // @types *int - print(r.Interface().(*int)) // @pointsto command-line-arguments.b -} - -func chanOfRecv() { - // MakeChan(<-chan) is a no-op. - t := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(&a)) - print(reflect.Zero(t).Interface()) // @types <-chan *int - print(reflect.MakeChan(t, 0).Interface().(<-chan *int)) // @pointsto - print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto -} - -func chanOfSend() { - // MakeChan(chan<-) is a no-op. - t := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(&a)) - print(reflect.Zero(t).Interface()) // @types chan<- *int - print(reflect.MakeChan(t, 0).Interface().(chan<- *int)) // @pointsto - print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto -} - -func chanOfBoth() { - t := reflect.ChanOf(reflect.BothDir, reflect.TypeOf(&a)) - print(reflect.Zero(t).Interface()) // @types chan *int - ch := reflect.MakeChan(t, 0) - print(ch.Interface().(chan *int)) // @pointsto - ch.Send(reflect.ValueOf(&b)) - ch.Interface().(chan *int) <- &a - r, _ := ch.Recv() - print(r.Interface().(*int)) // @pointsto command-line-arguments.a | command-line-arguments.b - print(<-ch.Interface().(chan *int)) // @pointsto command-line-arguments.a | command-line-arguments.b -} - -var unknownDir reflect.ChanDir // not a constant - -func chanOfUnknown() { - // Unknown channel direction: assume all three. - // MakeChan only works on the bi-di channel type. - t := reflect.ChanOf(unknownDir, reflect.TypeOf(&a)) - print(reflect.Zero(t).Interface()) // @types <-chan *int | chan<- *int | chan *int - print(reflect.MakeChan(t, 0).Interface()) // @types chan *int -} - -func main() { - chanreflect1() - chanreflect1i() - chanreflect2() - chanOfRecv() - chanOfSend() - chanOfBoth() - chanOfUnknown() -} diff --git a/go/pointer/testdata/chanreflect1.go b/go/pointer/testdata/chanreflect1.go deleted file mode 100644 index c5e25874333..00000000000 --- a/go/pointer/testdata/chanreflect1.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build ignore - -package main - -import "reflect" - -// -// This test is very sensitive to line-number perturbations! - -// Test of channels with reflection. - -var a, b int - -func chanreflect1() { - ch := make(chan *int, 0) - crv := reflect.ValueOf(ch) - crv.Send(reflect.ValueOf(&a)) - print(crv.Interface()) // @types chan *int - print(crv.Interface().(chan *int)) // @pointsto makechan@testdata/chanreflect.go:15:12 - print(<-ch) // @pointsto main.a -} - -func chanreflect2() { - ch := make(chan *int, 0) - ch <- &b - crv := reflect.ValueOf(ch) - r, _ := crv.Recv() - print(r.Interface()) // @types *int - print(r.Interface().(*int)) // @pointsto main.b -} - -func main() { - chanreflect1() - chanreflect2() -} diff --git a/go/pointer/testdata/context.go b/go/pointer/testdata/context.go deleted file mode 100644 index b76c2007013..00000000000 --- a/go/pointer/testdata/context.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Test of context-sensitive treatment of certain function calls, -// e.g. static calls to simple accessor methods. - -var a, b int - -type T struct{ x *int } - -func (t *T) SetX(x *int) { t.x = x } -func (t *T) GetX() *int { return t.x } - -func context1() { - var t1, t2 T - t1.SetX(&a) - t2.SetX(&b) - print(t1.GetX()) // @pointsto command-line-arguments.a - print(t2.GetX()) // @pointsto command-line-arguments.b -} - -func context2() { - id := func(x *int) *int { - print(x) // @pointsto command-line-arguments.a | command-line-arguments.b - return x - } - print(id(&a)) // @pointsto command-line-arguments.a - print(id(&b)) // @pointsto command-line-arguments.b - - // Same again, but anon func has free vars. - var c int // @line context2c - id2 := func(x *int) (*int, *int) { - print(x) // @pointsto command-line-arguments.a | command-line-arguments.b - return x, &c - } - p, q := id2(&a) - print(p) // @pointsto command-line-arguments.a - print(q) // @pointsto c@context2c:6 - r, s := id2(&b) - print(r) // @pointsto command-line-arguments.b - print(s) // @pointsto c@context2c:6 -} - -func main() { - context1() - context2() -} diff --git a/go/pointer/testdata/conv.go b/go/pointer/testdata/conv.go deleted file mode 100644 index 5ef1fdf0135..00000000000 --- a/go/pointer/testdata/conv.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import "unsafe" - -var a int - -func conv1() { - // Conversions of channel direction. - ch := make(chan int) // @line c1make - print((<-chan int)(ch)) // @pointsto makechan@c1make:12 - print((chan<- int)(ch)) // @pointsto makechan@c1make:12 -} - -func conv2() { - // string -> []byte/[]rune conversion - s := "foo" - ba := []byte(s) // @line c2ba - ra := []rune(s) // @line c2ra - print(ba) // @pointsto convert@c2ba:14 - print(ra) // @pointsto convert@c2ra:14 -} - -func conv3() { - // Conversion of same underlying types. - type PI *int - pi := PI(&a) - print(pi) // @pointsto command-line-arguments.a - - pint := (*int)(pi) - print(pint) // @pointsto command-line-arguments.a - - // Conversions between pointers to identical base types. - var y *PI = &pi - var x **int = (**int)(y) - print(*x) // @pointsto command-line-arguments.a - print(*y) // @pointsto command-line-arguments.a - y = (*PI)(x) - print(*y) // @pointsto command-line-arguments.a -} - -func conv4() { - // Handling of unsafe.Pointer conversion is unsound: - // we lose the alias to command-line-arguments.a and get something like new(int) instead. - p := (*int)(unsafe.Pointer(&a)) // @line c2p - print(p) // @pointsto convert@c2p:13 -} - -// Regression test for b/8231. -func conv5() { - type P unsafe.Pointer - var i *struct{} - _ = P(i) -} - -func main() { - conv1() - conv2() - conv3() - conv4() - conv5() -} diff --git a/go/pointer/testdata/extended.go b/go/pointer/testdata/extended.go deleted file mode 100644 index a95449c2efe..00000000000 --- a/go/pointer/testdata/extended.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -var a int - -type t struct { - a *map[string]chan *int -} - -func fn() []t { - m := make(map[string]chan *int) - m[""] = make(chan *int, 1) - m[""] <- &a - return []t{t{a: &m}} -} - -func main() { - x := fn() - print(x) // @pointstoquery <-(*x[i].a)[key] command-line-arguments.a -} diff --git a/go/pointer/testdata/finalizer.go b/go/pointer/testdata/finalizer.go deleted file mode 100644 index 7ee03da0743..00000000000 --- a/go/pointer/testdata/finalizer.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import "runtime" - -func final1a(x *int) int { - print(x) // @pointsto new@newint:10 - return *x -} - -func final1b(x *bool) { - print(x) // @pointsto -} - -func runtimeSetFinalizer1() { - x := new(int) // @line newint - runtime.SetFinalizer(x, final1a) // ok: final1a's result is ignored - runtime.SetFinalizer(x, final1b) // param type mismatch: no effect -} - -// @calls command-line-arguments.runtimeSetFinalizer1 -> command-line-arguments.final1a -// @calls command-line-arguments.runtimeSetFinalizer1 -> command-line-arguments.final1b - -func final2a(x *bool) { - print(x) // @pointsto new@newbool1:10 | new@newbool2:10 -} - -func final2b(x *bool) { - print(x) // @pointsto new@newbool1:10 | new@newbool2:10 -} - -func runtimeSetFinalizer2() { - x := new(bool) // @line newbool1 - f := final2a - if unknown { - x = new(bool) // @line newbool2 - f = final2b - } - runtime.SetFinalizer(x, f) -} - -// @calls command-line-arguments.runtimeSetFinalizer2 -> command-line-arguments.final2a -// @calls command-line-arguments.runtimeSetFinalizer2 -> command-line-arguments.final2b - -type T int - -func (t *T) finalize() { - print(t) // @pointsto new@final3:10 -} - -func runtimeSetFinalizer3() { - x := new(T) // @line final3 - runtime.SetFinalizer(x, (*T).finalize) -} - -// @calls command-line-arguments.runtimeSetFinalizer3 -> (*command-line-arguments.T).finalize$thunk - -// I hope I never live to see this code in the wild. -var setFinalizer = runtime.SetFinalizer - -func final4(x *int) { - print(x) // @pointsto new@finalIndirect:10 -} - -func runtimeSetFinalizerIndirect() { - // In an indirect call, the shared contour for SetFinalizer is - // used, i.e. the call is not inlined and appears in the call graph. - x := new(int) // @line finalIndirect - setFinalizer(x, final4) -} - -// Exercise the elimination of SetFinalizer -// constraints with non-pointer operands. -func runtimeSetFinalizerNonpointer() { - runtime.SetFinalizer(nil, (*T).finalize) // x is a non-pointer - runtime.SetFinalizer((*T).finalize, nil) // f is a non-pointer -} - -// @calls command-line-arguments.runtimeSetFinalizerIndirect -> runtime.SetFinalizer -// @calls runtime.SetFinalizer -> command-line-arguments.final4 - -func main() { - runtimeSetFinalizer1() - runtimeSetFinalizer2() - runtimeSetFinalizer3() - runtimeSetFinalizerIndirect() - runtimeSetFinalizerNonpointer() -} - -var unknown bool // defeat dead-code elimination diff --git a/go/pointer/testdata/flow.go b/go/pointer/testdata/flow.go deleted file mode 100644 index 9e8ce9355f5..00000000000 --- a/go/pointer/testdata/flow.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Demonstration of directionality of flow edges. - -func f1() {} -func f2() {} - -var somepred bool - -// Tracking functions. -func flow1() { - s := f1 - p := f2 - q := p - r := q - if somepred { - r = s - } - print(s) // @pointsto command-line-arguments.f1 - print(p) // @pointsto command-line-arguments.f2 - print(q) // @pointsto command-line-arguments.f2 - print(r) // @pointsto command-line-arguments.f1 | command-line-arguments.f2 -} - -// Tracking concrete types in interfaces. -func flow2() { - var s interface{} = 1 - var p interface{} = "foo" - q := p - r := q - if somepred { - r = s - } - print(s) // @types int - print(p) // @types string - print(q) // @types string - print(r) // @types int | string -} - -var g1, g2 int - -// Tracking addresses of globals. -func flow3() { - s := &g1 - p := &g2 - q := p - r := q - if somepred { - r = s - } - print(s) // @pointsto command-line-arguments.g1 - print(p) // @pointsto command-line-arguments.g2 - print(q) // @pointsto command-line-arguments.g2 - print(r) // @pointsto command-line-arguments.g2 | command-line-arguments.g1 -} - -func main() { - flow1() - flow2() - flow3() -} diff --git a/go/pointer/testdata/fmtexcerpt.go b/go/pointer/testdata/fmtexcerpt.go deleted file mode 100644 index 422e31d411b..00000000000 --- a/go/pointer/testdata/fmtexcerpt.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build ignore -// +build ignore - -// This is a slice of the fmt package. - -package main - -type pp struct { - field interface{} -} - -func newPrinter() *pp { - return new(pp) -} - -func Fprintln(a ...interface{}) { - p := newPrinter() - p.doPrint(a, true, true) -} - -func Println(a ...interface{}) { - Fprintln(a...) -} - -func (p *pp) doPrint(a []interface{}, addspace, addnewline bool) { - print(a[0]) // @types S | string - stringer := a[0].(interface { - String() string - }) - - stringer.String() - print(stringer) // @types S -} - -type S int - -func (S) String() string { return "" } - -func main() { - Println("Hello, World!", S(0)) -} - -// @calls (*command-line-arguments.pp).doPrint -> (command-line-arguments.S).String diff --git a/go/pointer/testdata/func.go b/go/pointer/testdata/func.go deleted file mode 100644 index 11a7138821e..00000000000 --- a/go/pointer/testdata/func.go +++ /dev/null @@ -1,206 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -var a, b, c int - -var unknown bool // defeat dead-code elimination - -func func1() { - var h int // @line f1h - f := func(x *int) *int { - if unknown { - return &b - } - return x - } - - // FV(g) = {f, h} - g := func(x *int) *int { - if unknown { - return &h - } - return f(x) - } - - print(g(&a)) // @pointsto command-line-arguments.a | command-line-arguments.b | h@f1h:6 - print(f(&a)) // @pointsto command-line-arguments.a | command-line-arguments.b - print(&a) // @pointsto command-line-arguments.a -} - -// @calls command-line-arguments.func1 -> command-line-arguments.func1$2 -// @calls command-line-arguments.func1 -> command-line-arguments.func1$1 -// @calls command-line-arguments.func1$2 -> command-line-arguments.func1$1 - -func func2() { - var x, y *int - defer func() { - x = &a - }() - go func() { - y = &b - }() - print(x) // @pointsto command-line-arguments.a - print(y) // @pointsto command-line-arguments.b -} - -func func3() { - x, y := func() (x, y *int) { - x = &a - y = &b - if unknown { - return nil, &c - } - return - }() - print(x) // @pointsto command-line-arguments.a - print(y) // @pointsto command-line-arguments.b | command-line-arguments.c -} - -func swap(x, y *int) (*int, *int) { // @line swap - print(&x) // @pointsto x@swap:11 - print(x) // @pointsto makeslice[*]@func4make:11 - print(&y) // @pointsto y@swap:14 - print(y) // @pointsto j@f4j:5 - return y, x -} - -func func4() { - a := make([]int, 10) // @line func4make - i, j := 123, 456 // @line f4j - _ = i - p, q := swap(&a[3], &j) - print(p) // @pointsto j@f4j:5 - print(q) // @pointsto makeslice[*]@func4make:11 - - f := &b - print(f) // @pointsto command-line-arguments.b -} - -type T int - -func (t *T) f(x *int) *int { - print(t) // @pointsto command-line-arguments.a - print(x) // @pointsto command-line-arguments.c - return &b -} - -func (t *T) g(x *int) *int { - print(t) // @pointsto command-line-arguments.a - print(x) // @pointsto command-line-arguments.b - return &c -} - -func (t *T) h(x *int) *int { - print(t) // @pointsto command-line-arguments.a - print(x) // @pointsto command-line-arguments.b - return &c -} - -var h func(*T, *int) *int - -func func5() { - // Static call of method. - t := (*T)(&a) - print(t.f(&c)) // @pointsto command-line-arguments.b - - // Static call of method as function - print((*T).g(t, &b)) // @pointsto command-line-arguments.c - - // Dynamic call (not invoke) of method. - h = (*T).h - print(h(t, &b)) // @pointsto command-line-arguments.c -} - -// @calls command-line-arguments.func5 -> (*command-line-arguments.T).f -// @calls command-line-arguments.func5 -> (*command-line-arguments.T).g$thunk -// @calls command-line-arguments.func5 -> (*command-line-arguments.T).h$thunk - -func func6() { - A := &a - f := func() *int { - return A // (free variable) - } - print(f()) // @pointsto command-line-arguments.a -} - -// @calls command-line-arguments.func6 -> command-line-arguments.func6$1 - -type I interface { - f() -} - -type D struct{} - -func (D) f() {} - -func func7() { - var i I = D{} - imethodClosure := i.f - imethodClosure() - // @calls command-line-arguments.func7 -> (command-line-arguments.I).f$bound - // @calls (command-line-arguments.I).f$bound -> (command-line-arguments.D).f - - var d D - cmethodClosure := d.f - cmethodClosure() - // @calls command-line-arguments.func7 -> (command-line-arguments.D).f$bound - // @calls (command-line-arguments.D).f$bound ->(command-line-arguments.D).f - - methodExpr := D.f - methodExpr(d) - // @calls command-line-arguments.func7 -> (command-line-arguments.D).f$thunk -} - -func func8(x ...int) { - print(&x[0]) // @pointsto varargs[*]@varargs:15 -} - -type E struct { - x1, x2, x3, x4, x5 *int -} - -func (e E) f() {} - -func func9() { - // Regression test for bug reported by Jon Valdes on golang-dev, Jun 19 2014. - // The receiver of a bound method closure may be of a multi-node type, E. - // valueNode was reserving only a single node for it, so the - // nodes used by the immediately following constraints - // (e.g. param 'i') would get clobbered. - - var e E - e.x1 = &a - e.x2 = &a - e.x3 = &a - e.x4 = &a - e.x5 = &a - - _ = e.f // form a closure---must reserve sizeof(E) nodes - - func(i I) { - i.f() // must not crash the solver - }(new(D)) - - print(e.x1) // @pointsto command-line-arguments.a - print(e.x2) // @pointsto command-line-arguments.a - print(e.x3) // @pointsto command-line-arguments.a - print(e.x4) // @pointsto command-line-arguments.a - print(e.x5) // @pointsto command-line-arguments.a -} - -func main() { - func1() - func2() - func3() - func4() - func5() - func6() - func7() - func8(1, 2, 3) // @line varargs - func9() -} - -// @calls -> command-line-arguments.main -// @calls -> command-line-arguments.init diff --git a/go/pointer/testdata/funcreflect.go b/go/pointer/testdata/funcreflect.go deleted file mode 100644 index 2b4315ba555..00000000000 --- a/go/pointer/testdata/funcreflect.go +++ /dev/null @@ -1,131 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import "reflect" - -var zero, a, b int -var false2 bool - -func f(p *int, q hasF) *int { - print(p) // @pointsto command-line-arguments.a - print(q) // @types *T - print(q.(*T)) // @pointsto new@newT1:22 - return &b -} - -func g(p *bool) (*int, *bool, hasF) { - return &b, p, new(T) // @line newT2 -} - -func reflectValueCall() { - rvf := reflect.ValueOf(f) - res := rvf.Call([]reflect.Value{ - // argument order is not significant: - reflect.ValueOf(new(T)), // @line newT1 - reflect.ValueOf(&a), - }) - print(res[0].Interface()) // @types *int - print(res[0].Interface().(*int)) // @pointsto command-line-arguments.b -} - -// @calls command-line-arguments.reflectValueCall -> command-line-arguments.f - -func reflectValueCallIndirect() { - rvf := reflect.ValueOf(g) - call := rvf.Call // kids, don't try this at home - - // Indirect call uses shared contour. - // - // Also notice that argument position doesn't matter, and args - // of inappropriate type (e.g. 'a') are ignored. - res := call([]reflect.Value{ - reflect.ValueOf(&a), - reflect.ValueOf(&false2), - }) - res0 := res[0].Interface() - print(res0) // @types *int | *bool | *T - print(res0.(*int)) // @pointsto command-line-arguments.b - print(res0.(*bool)) // @pointsto command-line-arguments.false2 - print(res0.(hasF)) // @types *T - print(res0.(*T)) // @pointsto new@newT2:19 -} - -// @calls command-line-arguments.reflectValueCallIndirect -> (reflect.Value).Call$bound -// @calls (reflect.Value).Call$bound -> command-line-arguments.g - -func reflectTypeInOut() { - var f func(float64, bool) (string, int) - print(reflect.Zero(reflect.TypeOf(f).In(0)).Interface()) // @types float64 - print(reflect.Zero(reflect.TypeOf(f).In(1)).Interface()) // @types bool - print(reflect.Zero(reflect.TypeOf(f).In(-1)).Interface()) // @types float64 | bool - print(reflect.Zero(reflect.TypeOf(f).In(zero)).Interface()) // @types float64 | bool - - print(reflect.Zero(reflect.TypeOf(f).Out(0)).Interface()) // @types string - print(reflect.Zero(reflect.TypeOf(f).Out(1)).Interface()) // @types int - print(reflect.Zero(reflect.TypeOf(f).Out(2)).Interface()) // @types - - print(reflect.Zero(reflect.TypeOf(3).Out(0)).Interface()) // @types -} - -type hasF interface { - F() -} - -type T struct{} - -func (T) F() {} -func (T) g(int) {} - -type U struct{} - -func (U) F(int) {} -func (U) g(string) {} - -type I interface { - f() -} - -var nonconst string - -func reflectTypeMethodByName() { - TU := reflect.TypeOf([]interface{}{T{}, U{}}[0]) - print(reflect.Zero(TU)) // @types T | U - - F, _ := TU.MethodByName("F") - print(reflect.Zero(F.Type)) // @types func(T) | func(U, int) - print(F.Func) // @pointsto (command-line-arguments.T).F | (command-line-arguments.U).F - - g, _ := TU.MethodByName("g") - print(reflect.Zero(g.Type)) // @types func(T, int) | func(U, string) - print(g.Func) // @pointsto (command-line-arguments.T).g | (command-line-arguments.U).g - - // Non-literal method names are treated less precisely. - U := reflect.TypeOf(U{}) - X, _ := U.MethodByName(nonconst) - print(reflect.Zero(X.Type)) // @types func(U, int) | func(U, string) - print(X.Func) // @pointsto (command-line-arguments.U).F | (command-line-arguments.U).g - - // Interface methods. - rThasF := reflect.TypeOf(new(hasF)).Elem() - print(reflect.Zero(rThasF)) // @types hasF - F2, _ := rThasF.MethodByName("F") - print(reflect.Zero(F2.Type)) // @types func() - print(F2.Func) // @pointsto - -} - -func reflectTypeMethod() { - m := reflect.TypeOf(T{}).Method(0) - print(reflect.Zero(m.Type)) // @types func(T) | func(T, int) - print(m.Func) // @pointsto (command-line-arguments.T).F | (command-line-arguments.T).g -} - -func main() { - reflectValueCall() - reflectValueCallIndirect() - reflectTypeInOut() - reflectTypeMethodByName() - reflectTypeMethod() -} diff --git a/go/pointer/testdata/hello.go b/go/pointer/testdata/hello.go deleted file mode 100644 index 3967cbe00ba..00000000000 --- a/go/pointer/testdata/hello.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import ( - "fmt" - "os" -) - -type S int - -var theS S - -func (s *S) String() string { - print(s) // @pointsto command-line-arguments.theS - return "" -} - -func main() { - // os.Args is considered intrinsically allocated, - // but may also be set explicitly (e.g. on Windows), hence '...'. - print(os.Args) // @pointsto | ... - fmt.Println("Hello, World!", &theS) -} - -// @calls command-line-arguments.main -> fmt.Println -// @calls (*fmt.pp).handleMethods -> (*command-line-arguments.S).String diff --git a/go/pointer/testdata/interfaces.go b/go/pointer/testdata/interfaces.go deleted file mode 100644 index 2312e13edcc..00000000000 --- a/go/pointer/testdata/interfaces.go +++ /dev/null @@ -1,153 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -type I interface { - f() -} - -type C int - -func (*C) f() {} - -type D struct{ ptr *int } - -func (D) f() {} - -type E struct{} - -func (*E) f() {} - -var a, b int - -var unknown bool // defeat dead-code elimination - -func interface1() { - var i interface{} = &a - var j interface{} = D{&b} - k := j - if unknown { - k = i - } - - print(i) // @types *int - print(j) // @types D - print(k) // @types *int | D - - print(i.(*int)) // @pointsto command-line-arguments.a - print(j.(*int)) // @pointsto - print(k.(*int)) // @pointsto command-line-arguments.a - - print(i.(D).ptr) // @pointsto - print(j.(D).ptr) // @pointsto command-line-arguments.b - print(k.(D).ptr) // @pointsto command-line-arguments.b -} - -func interface2() { - var i I = (*C)(&a) - var j I = D{&a} - k := j - if unknown { - k = i - } - - print(i) // @types *C - print(j) // @types D - print(k) // @types *C | D - print(k) // @pointsto makeinterface:command-line-arguments.D | makeinterface:*command-line-arguments.C - - k.f() - // @calls command-line-arguments.interface2 -> (*command-line-arguments.C).f - // @calls command-line-arguments.interface2 -> (command-line-arguments.D).f - - print(i.(*C)) // @pointsto command-line-arguments.a - print(j.(D).ptr) // @pointsto command-line-arguments.a - print(k.(*C)) // @pointsto command-line-arguments.a - - switch x := k.(type) { - case *C: - print(x) // @pointsto command-line-arguments.a - case D: - print(x.ptr) // @pointsto command-line-arguments.a - case *E: - print(x) // @pointsto - } -} - -func interface3() { - // There should be no backflow of concrete types from the type-switch to x. - var x interface{} = 0 - print(x) // @types int - switch x.(type) { - case int: - case string: - } -} - -func interface4() { - var i interface{} = D{&a} - if unknown { - i = 123 - } - - print(i) // @types int | D - - j := i.(I) // interface narrowing type-assertion - print(j) // @types D - print(j.(D).ptr) // @pointsto command-line-arguments.a - - var l interface{} = j // interface widening assignment. - print(l) // @types D - print(l.(D).ptr) // @pointsto command-line-arguments.a - - m := j.(interface{}) // interface widening type-assertion. - print(m) // @types D - print(m.(D).ptr) // @pointsto command-line-arguments.a -} - -// Interface method calls and value flow: - -type J interface { - f(*int) *int -} - -type P struct { - x int -} - -func (p *P) f(pi *int) *int { - print(p) // @pointsto p@i5p:6 - print(pi) // @pointsto i@i5i:6 - return &p.x -} - -func interface5() { - var p P // @line i5p - var j J = &p - var i int // @line i5i - print(j.f(&i)) // @pointsto p.x@i5p:6 - print(&i) // @pointsto i@i5i:6 - - print(j) // @pointsto makeinterface:*command-line-arguments.P -} - -// @calls command-line-arguments.interface5 -> (*command-line-arguments.P).f - -func interface6() { - f := I.f - print(f) // @pointsto (command-line-arguments.I).f$thunk - f(new(struct{ D })) -} - -// @calls command-line-arguments.interface6 -> (command-line-arguments.I).f$thunk -// @calls (command-line-arguments.I).f$thunk -> (*struct{command-line-arguments.D}).f - -func main() { - interface1() - interface2() - interface3() - interface4() - interface5() - interface6() -} diff --git a/go/pointer/testdata/issue9002.go b/go/pointer/testdata/issue9002.go deleted file mode 100644 index b7c2c610903..00000000000 --- a/go/pointer/testdata/issue9002.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -func main() { - // Regression test for golang issue 9002. - // - // The two-result "value,ok" receive operation generated a - // too-wide constraint loading (value int, ok bool), not bool, - // from the channel. - // - // This bug manifested itself in an out-of-bounds array access - // when the makechan object was the highest-numbered node, as in - // this program. - // - // In more realistic programs it silently resulted in bogus - // constraints. - _, _ = <-make(chan int) -} diff --git a/go/pointer/testdata/mapreflect.go b/go/pointer/testdata/mapreflect.go deleted file mode 100644 index d8c1d5a89d9..00000000000 --- a/go/pointer/testdata/mapreflect.go +++ /dev/null @@ -1,118 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Test of maps with reflection. - -import "reflect" - -var a int -var b bool - -func reflectMapKeysIndex() { - m := make(map[*int]*bool) // @line mr1make - m[&a] = &b - - mrv := reflect.ValueOf(m) - print(mrv.Interface()) // @types map[*int]*bool - print(mrv.Interface().(map[*int]*bool)) // @pointsto makemap@mr1make:11 - print(mrv) // @pointsto makeinterface:map[*int]*bool - print(mrv) // @types map[*int]*bool - - keys := mrv.MapKeys() - print(keys) // @pointsto - for _, k := range keys { - print(k) // @pointsto - print(k) // @types *int - print(k.Interface()) // @types *int - print(k.Interface().(*int)) // @pointsto command-line-arguments.a - - v := mrv.MapIndex(k) - print(v.Interface()) // @types *bool - print(v.Interface().(*bool)) // @pointsto command-line-arguments.b - } -} - -func reflectSetMapIndex() { - m := make(map[*int]*bool) - mrv := reflect.ValueOf(m) - mrv.SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b)) - - print(m[nil]) // @pointsto command-line-arguments.b - - for _, k := range mrv.MapKeys() { - print(k.Interface()) // @types *int - print(k.Interface().(*int)) // @pointsto command-line-arguments.a - } - - tmap := reflect.TypeOf(m) - // types.EvalNode won't let us refer to non-exported types: - // print(tmap) // #@types *reflect.rtype - print(tmap) // @pointsto map[*int]*bool - - zmap := reflect.Zero(tmap) - print(zmap) // @pointsto - print(zmap.Interface()) // @pointsto - - print(tmap.Key()) // @pointsto *int - print(tmap.Elem()) // @pointsto *bool - print(reflect.Zero(tmap.Key())) // @pointsto - print(reflect.Zero(tmap.Key()).Interface()) // @pointsto - print(reflect.Zero(tmap.Key()).Interface()) // @types *int - print(reflect.Zero(tmap.Elem())) // @pointsto - print(reflect.Zero(tmap.Elem()).Interface()) // @pointsto - print(reflect.Zero(tmap.Elem()).Interface()) // @types *bool -} - -func reflectSetMapIndexInterface() { - // Exercises reflect.Value conversions to/from interfaces: - // a different code path than for concrete types. - m := make(map[interface{}]interface{}) - reflect.ValueOf(m).SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b)) - for k, v := range m { - print(k) // @types *int - print(k.(*int)) // @pointsto command-line-arguments.a - print(v) // @types *bool - print(v.(*bool)) // @pointsto command-line-arguments.b - } -} - -func reflectSetMapIndexAssignable() { - // SetMapIndex performs implicit assignability conversions. - type I *int - type J *int - - str := reflect.ValueOf("") - - // *int is assignable to I. - m1 := make(map[string]I) - reflect.ValueOf(m1).SetMapIndex(str, reflect.ValueOf(new(int))) // @line int - print(m1[""]) // @pointsto new@int:58 - - // I is assignable to I. - m2 := make(map[string]I) - reflect.ValueOf(m2).SetMapIndex(str, reflect.ValueOf(I(new(int)))) // @line I - print(m2[""]) // @pointsto new@I:60 - - // J is not assignable to I. - m3 := make(map[string]I) - reflect.ValueOf(m3).SetMapIndex(str, reflect.ValueOf(J(new(int)))) - print(m3[""]) // @pointsto -} - -func reflectMakeMap() { - t := reflect.TypeOf(map[*int]*bool(nil)) - v := reflect.MakeMap(t) - print(v) // @types map[*int]*bool - print(v) // @pointsto -} - -func main() { - reflectMapKeysIndex() - reflectSetMapIndex() - reflectSetMapIndexInterface() - reflectSetMapIndexAssignable() - reflectMakeMap() - // TODO(adonovan): reflect.MapOf(Type) -} diff --git a/go/pointer/testdata/maps.go b/go/pointer/testdata/maps.go deleted file mode 100644 index cce4a1020bb..00000000000 --- a/go/pointer/testdata/maps.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Test of maps. - -var a, b, c int - -func maps1() { - m1 := map[*int]*int{&a: &b} // @line m1m1 - m2 := make(map[*int]*int) // @line m1m2 - m2[&b] = &a - - print(m1[nil]) // @pointsto command-line-arguments.b | command-line-arguments.c - print(m2[nil]) // @pointsto command-line-arguments.a - - print(m1) // @pointsto makemap@m1m1:21 - print(m2) // @pointsto makemap@m1m2:12 - - m1[&b] = &c - - for k, v := range m1 { - print(k) // @pointsto command-line-arguments.a | command-line-arguments.b - print(v) // @pointsto command-line-arguments.b | command-line-arguments.c - } - - for k, v := range m2 { - print(k) // @pointsto command-line-arguments.b - print(v) // @pointsto command-line-arguments.a - } - - // Lookup doesn't create any aliases. - print(m2[&c]) // @pointsto command-line-arguments.a - if _, ok := m2[&a]; ok { - print(m2[&c]) // @pointsto command-line-arguments.a - } -} - -func maps2() { - m1 := map[*int]*int{&a: &b} - m2 := map[*int]*int{&b: &c} - _ = []map[*int]*int{m1, m2} // (no spurious merging of m1, m2) - - print(m1[nil]) // @pointsto command-line-arguments.b - print(m2[nil]) // @pointsto command-line-arguments.c -} - -var g int - -func maps3() { - // Regression test for a constraint generation bug for map range - // loops in which the key is unused: the (ok, k, v) tuple - // returned by ssa.Next may have type 'invalid' for the k and/or - // v components, so copying the map key or value may cause - // miswiring if the key has >1 components. In the worst case, - // this causes a crash. The test below used to report that - // pts(v) includes not just command-line-arguments.g but new(float64) too, which - // is ill-typed. - - // sizeof(K) > 1, abstractly - type K struct{ a, b, c, d *float64 } - k := K{new(float64), nil, nil, nil} - m := map[K]*int{k: &g} - - for _, v := range m { - print(v) // @pointsto command-line-arguments.g - } -} - -var v float64 - -func maps4() { - // Regression test for generating constraints for cases of key and values - // being blank identifiers or different types assignable from the - // corresponding map types in a range stmt. - type K struct{ a *float64 } - k := K{&v} - m := map[K]*int{k: &g} - - for x, y := range m { - print(x.a) // @pointsto command-line-arguments.v - print(y) // @pointsto command-line-arguments.g - } - var i struct{ a *float64 } - for i, _ = range m { - print(i.a) // @pointsto command-line-arguments.v - } - var j interface{} - for _, j = range m { - // TODO support the statement `print(j.(*int))` - print(j) // @pointsto command-line-arguments.g - } - for _, _ = range m { - } - // do something after 'for _, _ =' to exercise the - // effects of indexing - for _, j = range m { - // TODO support the statement `print(j.(*int))` - print(j) // @pointsto command-line-arguments.g - } -} - -func main() { - maps1() - maps2() - maps3() - maps4() -} diff --git a/go/pointer/testdata/panic.go b/go/pointer/testdata/panic.go deleted file mode 100644 index 3377d836d4f..00000000000 --- a/go/pointer/testdata/panic.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Test of value flow from panic() to recover(). -// We model them as stores/loads of a global location. -// We ignore concrete panic types originating from the runtime. - -var someval int - -type myPanic struct{} - -func f(int) {} - -func g() string { return "" } - -func deadcode() { - panic(123) // not reached -} - -func main() { - switch someval { - case 0: - panic("oops") - case 1: - panic(myPanic{}) - case 2: - panic(f) - case 3: - panic(g) - } - ex := recover() - print(ex) // @types myPanic | string | func(int) | func() string - print(ex.(func(int))) // @pointsto command-line-arguments.f - print(ex.(func() string)) // @pointsto command-line-arguments.g -} diff --git a/go/pointer/testdata/recur.go b/go/pointer/testdata/recur.go deleted file mode 100644 index 06567633025..00000000000 --- a/go/pointer/testdata/recur.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -// Analysis abstraction of recursive calls is finite. - -func main() { - main() -} - -// @calls command-line-arguments.main -> command-line-arguments.main diff --git a/go/pointer/testdata/reflect.go b/go/pointer/testdata/reflect.go deleted file mode 100644 index cf3195a6a20..00000000000 --- a/go/pointer/testdata/reflect.go +++ /dev/null @@ -1,118 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import ( - "reflect" - "unsafe" -) - -var a, b int -var unknown bool - -func reflectIndirect() { - ptr := &a - // Pointer: - print(reflect.Indirect(reflect.ValueOf(&ptr)).Interface().(*int)) // @pointsto command-line-arguments.a - // Non-pointer: - print(reflect.Indirect(reflect.ValueOf([]*int{ptr})).Interface().([]*int)[0]) // @pointsto command-line-arguments.a -} - -func reflectNewAt() { - var x [8]byte - print(reflect.NewAt(reflect.TypeOf(3), unsafe.Pointer(&x)).Interface()) // @types *int -} - -// @warning "unsound: command-line-arguments.reflectNewAt contains a reflect.NewAt.. call" - -func reflectTypeOf() { - t := reflect.TypeOf(3) - if unknown { - t = reflect.TypeOf("foo") - } - // TODO(adonovan): make types.Eval let us refer to unexported types. - print(t) // #@types *reflect.rtype - print(reflect.Zero(t).Interface()) // @types int | string - newint := reflect.New(t).Interface() // @line rtonew - print(newint) // @types *int | *string - print(newint.(*int)) // @pointsto - print(newint.(*string)) // @pointsto -} - -func reflectTypeElem() { - print(reflect.Zero(reflect.TypeOf(&a).Elem()).Interface()) // @types int - print(reflect.Zero(reflect.TypeOf([]string{}).Elem()).Interface()) // @types string - print(reflect.Zero(reflect.TypeOf(make(chan bool)).Elem()).Interface()) // @types bool - print(reflect.Zero(reflect.TypeOf(make(map[string]float64)).Elem()).Interface()) // @types float64 - print(reflect.Zero(reflect.TypeOf([3]complex64{}).Elem()).Interface()) // @types complex64 - print(reflect.Zero(reflect.TypeOf(3).Elem()).Interface()) // @types - print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem())) // @types interface{} - print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem()).Interface()) // @types -} - -// reflect.Values within reflect.Values. -func metareflection() { - // "box" a *int twice, unbox it twice. - v0 := reflect.ValueOf(&a) - print(v0) // @types *int - v1 := reflect.ValueOf(v0) // box - print(v1) // @types reflect.Value - v2 := reflect.ValueOf(v1) // box - print(v2) // @types reflect.Value - v1a := v2.Interface().(reflect.Value) // unbox - print(v1a) // @types reflect.Value - v0a := v1a.Interface().(reflect.Value) // unbox - print(v0a) // @types *int - print(v0a.Interface().(*int)) // @pointsto command-line-arguments.a - - // "box" an interface{} lvalue twice, unbox it twice. - var iface interface{} = 3 - x0 := reflect.ValueOf(&iface).Elem() - print(x0) // @types interface{} - x1 := reflect.ValueOf(x0) // box - print(x1) // @types reflect.Value - x2 := reflect.ValueOf(x1) // box - print(x2) // @types reflect.Value - x1a := x2.Interface().(reflect.Value) // unbox - print(x1a) // @types reflect.Value - x0a := x1a.Interface().(reflect.Value) // unbox - print(x0a) // @types interface{} - print(x0a.Interface()) // @types int -} - -type T struct{} - -// When the output of a type constructor flows to its input, we must -// bound the set of types created to ensure termination of the algorithm. -func typeCycle() { - t := reflect.TypeOf(0) - u := reflect.TypeOf("") - v := reflect.TypeOf(T{}) - for unknown { - t = reflect.PtrTo(t) - t = reflect.SliceOf(t) - - u = reflect.SliceOf(u) - - if unknown { - v = reflect.ChanOf(reflect.BothDir, v) - } else { - v = reflect.PtrTo(v) - } - } - - // Type height is bounded to about 4 map/slice/chan/pointer constructors. - print(reflect.Zero(t).Interface()) // @types int | []*int | []*[]*int - print(reflect.Zero(u).Interface()) // @types string | []string | [][]string | [][][]string | [][][][]string - print(reflect.Zero(v).Interface()) // @types T | *T | **T | ***T | ****T | chan T | *chan T | **chan T | chan *T | *chan *T | chan **T | chan ***T | chan chan T | chan *chan T | chan chan *T -} - -func main() { - reflectIndirect() - reflectNewAt() - reflectTypeOf() - reflectTypeElem() - metareflection() - typeCycle() -} diff --git a/go/pointer/testdata/rtti.go b/go/pointer/testdata/rtti.go deleted file mode 100644 index 05b4a88c1f8..00000000000 --- a/go/pointer/testdata/rtti.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -// Regression test for guru crash -// https://code.google.com/p/go/issues/detail?id=6605 -// -// Using reflection, methods may be called on types that are not the -// operand of any ssa.MakeInterface instruction. In this example, -// (Y).F is called by deriving the type Y from *Y. Prior to the fix, -// no RTTI (or method set) for type Y was included in the program, so -// the F() call would crash. - -import "reflect" - -var a int - -type X struct{} - -func (X) F() *int { - return &a -} - -type I interface { - F() *int -} - -func main() { - type Y struct{ X } - print(reflect.Indirect(reflect.ValueOf(new(Y))).Interface().(I).F()) // @pointsto command-line-arguments.a -} diff --git a/go/pointer/testdata/structreflect.go b/go/pointer/testdata/structreflect.go deleted file mode 100644 index 9fb49f5590e..00000000000 --- a/go/pointer/testdata/structreflect.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build ignore - -package main - -import "reflect" - -type A struct { - f *int - g interface{} - h bool -} - -var dyn string - -func reflectTypeFieldByName() { - f, _ := reflect.TypeOf(A{}).FieldByName("f") - print(f.Type) // @pointsto *int - - g, _ := reflect.TypeOf(A{}).FieldByName("g") - print(g.Type) // @pointsto interface{} - print(reflect.Zero(g.Type)) // @pointsto - print(reflect.Zero(g.Type)) // @types interface{} - - print(reflect.Zero(g.Type).Interface()) // @pointsto - print(reflect.Zero(g.Type).Interface()) // @types - - h, _ := reflect.TypeOf(A{}).FieldByName("h") - print(h.Type) // @pointsto bool - - missing, _ := reflect.TypeOf(A{}).FieldByName("missing") - print(missing.Type) // @pointsto - - dyn, _ := reflect.TypeOf(A{}).FieldByName(dyn) - print(dyn.Type) // @pointsto *int | bool | interface{} -} - -func reflectTypeField() { - fld := reflect.TypeOf(A{}).Field(0) - print(fld.Type) // @pointsto *int | bool | interface{} -} - -func main() { - reflectTypeFieldByName() - reflectTypeField() -} diff --git a/go/pointer/testdata/structs.go b/go/pointer/testdata/structs.go deleted file mode 100644 index 085439e0466..00000000000 --- a/go/pointer/testdata/structs.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -var unknown bool // defeat dead-code elimination - -var p, q int - -type A struct { - f *int - g interface{} -} - -func (a A) m1() { - print(a.f) // @pointsto command-line-arguments.p -} - -func (a *A) m2() { - print(a) // @pointsto complit.A@struct1s:9 - print(a.f) // @pointsto command-line-arguments.p -} - -type B struct { - h *int - A -} - -func structs1() { - b := &B{ // @line struct1s - h: &q, - } - b.f = &p - b.g = b - - print(b.h) // @pointsto command-line-arguments.q - print(b.f) // @pointsto command-line-arguments.p - print(b.g) // @types *B - - ptr := &b.f - print(*ptr) // @pointsto command-line-arguments.p - - b.m1() - b.m2() -} - -// @calls command-line-arguments.structs1 -> (command-line-arguments.A).m1 -// @calls command-line-arguments.structs1 -> (*command-line-arguments.A).m2 -// @calls (*command-line-arguments.B).m1 -> (command-line-arguments.A).m1 -// @calls (*command-line-arguments.B).m2 -> (*command-line-arguments.A).m2 - -type T struct { - x int - y int -} - -type S struct { - a [3]T - b *[3]T - c [3]*T -} - -func structs2() { - var s S // @line s2s - print(&s) // @pointsto s@s2s:6 - print(&s.a) // @pointsto s.a@s2s:6 - print(&s.a[0]) // @pointsto s.a[*]@s2s:6 - print(&s.a[0].x) // @pointsto s.a[*].x@s2s:6 - print(&s.a[0].y) // @pointsto s.a[*].y@s2s:6 - print(&s.b) // @pointsto s.b@s2s:6 - print(&s.b[0]) // @pointsto - print(&s.b[0].x) // @pointsto - print(&s.b[0].y) // @pointsto - print(&s.c) // @pointsto s.c@s2s:6 - print(&s.c[0]) // @pointsto s.c[*]@s2s:6 - print(&s.c[0].x) // @pointsto - print(&s.c[0].y) // @pointsto - - var s2 S // @line s2s2 - s2.b = new([3]T) // @line s2s2b - print(s2.b) // @pointsto new@s2s2b:12 - print(&s2.b) // @pointsto s2.b@s2s2:6 - print(&s2.b[0]) // @pointsto new[*]@s2s2b:12 - print(&s2.b[0].x) // @pointsto new[*].x@s2s2b:12 - print(&s2.b[0].y) // @pointsto new[*].y@s2s2b:12 - print(&s2.c[0].x) // @pointsto - print(&s2.c[0].y) // @pointsto - - var s3 S // @line s2s3 - s3.c[2] = new(T) // @line s2s3c - print(&s3.c) // @pointsto s3.c@s2s3:6 - print(s3.c[1]) // @pointsto new@s2s3c:15 - print(&s3.c[1]) // @pointsto s3.c[*]@s2s3:6 - print(&s3.c[1].x) // @pointsto new.x@s2s3c:15 - print(&s3.c[1].y) // @pointsto new.y@s2s3c:15 -} - -func main() { - structs1() - structs2() -} diff --git a/go/pointer/testdata/timer.go b/go/pointer/testdata/timer.go deleted file mode 100644 index 465d0813a18..00000000000 --- a/go/pointer/testdata/timer.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build ignore - -package main - -import "time" - -func after() {} - -func main() { - // @calls time.startTimer -> time.sendTime - ticker := time.NewTicker(1) - <-ticker.C - - // @calls time.startTimer -> time.sendTime - timer := time.NewTimer(time.Second) - <-timer.C - - // @calls time.startTimer -> time.goFunc - // @calls time.goFunc -> main.after - timer = time.AfterFunc(time.Second, after) - <-timer.C -} - -// @calls time.sendTime -> time.Now diff --git a/go/pointer/testdata/typeparams.go b/go/pointer/testdata/typeparams.go deleted file mode 100644 index 461ba443768..00000000000 --- a/go/pointer/testdata/typeparams.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build ignore -// +build ignore - -package main - -import ( - "fmt" - "os" -) - -type S[T any] struct{ t T } - -var theSint S[int] -var theSbool S[bool] - -func (s *S[T]) String() string { - print(s) // @pointsto command-line-arguments.theSbool | command-line-arguments.theSint - return "" -} - -func Type[T any]() { - var x *T - print(x) // @types *int | *bool -} - -func Caller[T any]() { - var s *S[T] - _ = s.String() -} - -var a int -var b bool - -type t[T any] struct { - a *map[string]chan *T -} - -func fn[T any](a *T) { - m := make(map[string]chan *T) - m[""] = make(chan *T, 1) - m[""] <- a - x := []t[T]{t[T]{a: &m}} - print(x) // @pointstoquery <-(*x[i].a)[key] command-line-arguments.a | command-line-arguments.b -} - -func main() { - // os.Args is considered intrinsically allocated, - // but may also be set explicitly (e.g. on Windows), hence '...'. - print(os.Args) // @pointsto | ... - fmt.Println("Hello!", &theSint) - fmt.Println("World!", &theSbool) - - Type[int]() // call - f := Type[bool] // call through a variable - _ = Type[string] // not called so will not appear in Type's print. - f() - - Caller[int]() - Caller[bool]() - - fn(&a) - fn(&b) -} - -// @calls (*fmt.pp).handleMethods -> (*command-line-arguments.S[int]).String[int] -// @calls (*fmt.pp).handleMethods -> (*command-line-arguments.S[bool]).String[bool] -// @calls command-line-arguments.Caller[int] -> (*command-line-arguments.S[int]).String[int] -// @calls command-line-arguments.Caller[bool] -> (*command-line-arguments.S[bool]).String[bool] diff --git a/go/pointer/util.go b/go/pointer/util.go deleted file mode 100644 index 17728aa06ac..00000000000 --- a/go/pointer/util.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pointer - -import ( - "bytes" - "fmt" - "go/types" - "log" - "os" - "runtime" - "time" - - exec "golang.org/x/sys/execabs" - - "golang.org/x/tools/container/intsets" -) - -// CanPoint reports whether the type T is pointerlike, -// for the purposes of this analysis. -func CanPoint(T types.Type) bool { - switch T := T.(type) { - case *types.Named: - if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" { - return true // treat reflect.Value like interface{} - } - return CanPoint(T.Underlying()) - case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice: - return true - } - - return false // array struct tuple builtin basic -} - -// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types, -// i.e. is an interface (incl. reflect.Type) or a reflect.Value. -func CanHaveDynamicTypes(T types.Type) bool { - switch T := T.(type) { - case *types.Named: - if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" { - return true // reflect.Value - } - return CanHaveDynamicTypes(T.Underlying()) - case *types.Interface: - return true - } - return false -} - -func isInterface(T types.Type) bool { return types.IsInterface(T) } - -// mustDeref returns the element type of its argument, which must be a -// pointer; panic ensues otherwise. -func mustDeref(typ types.Type) types.Type { - return typ.Underlying().(*types.Pointer).Elem() -} - -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return typ -} - -// A fieldInfo describes one subelement (node) of the flattening-out -// of a type T: the subelement's type and its path from the root of T. -// -// For example, for this type: -// -// type line struct{ points []struct{x, y int} } -// -// flatten() of the inner struct yields the following []fieldInfo: -// -// struct{ x, y int } "" -// int ".x" -// int ".y" -// -// and flatten(line) yields: -// -// struct{ points []struct{x, y int} } "" -// struct{ x, y int } ".points[*]" -// int ".points[*].x -// int ".points[*].y" -type fieldInfo struct { - typ types.Type - - // op and tail describe the path to the element (e.g. ".a#2.b[*].c"). - op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil - tail *fieldInfo -} - -// path returns a user-friendly string describing the subelement path. -func (fi *fieldInfo) path() string { - var buf bytes.Buffer - for p := fi; p != nil; p = p.tail { - switch op := p.op.(type) { - case bool: - fmt.Fprintf(&buf, "[*]") - case int: - fmt.Fprintf(&buf, "#%d", op) - case *types.Var: - fmt.Fprintf(&buf, ".%s", op.Name()) - } - } - return buf.String() -} - -// flatten returns a list of directly contained fields in the preorder -// traversal of the type tree of t. The resulting elements are all -// scalars (basic types or pointerlike types), except for struct/array -// "identity" nodes, whose type is that of the aggregate. -// -// reflect.Value is considered pointerlike, similar to interface{}. -// -// Callers must not mutate the result. -func (a *analysis) flatten(t types.Type) []*fieldInfo { - fl, ok := a.flattenMemo[t] - if !ok { - switch t := t.(type) { - case *types.Named: - u := t.Underlying() - if isInterface(u) { - // Debuggability hack: don't remove - // the named type from interfaces as - // they're very verbose. - fl = append(fl, &fieldInfo{typ: t}) // t may be a type param - } else { - fl = a.flatten(u) - } - - case *types.Basic, - *types.Signature, - *types.Chan, - *types.Map, - *types.Interface, - *types.Slice, - *types.Pointer: - fl = append(fl, &fieldInfo{typ: t}) - - case *types.Array: - fl = append(fl, &fieldInfo{typ: t}) // identity node - for _, fi := range a.flatten(t.Elem()) { - fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi}) - } - - case *types.Struct: - fl = append(fl, &fieldInfo{typ: t}) // identity node - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - for _, fi := range a.flatten(f.Type()) { - fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi}) - } - } - - case *types.Tuple: - // No identity node: tuples are never address-taken. - n := t.Len() - if n == 1 { - // Don't add a fieldInfo link for singletons, - // e.g. in params/results. - fl = append(fl, a.flatten(t.At(0).Type())...) - } else { - for i := 0; i < n; i++ { - f := t.At(i) - for _, fi := range a.flatten(f.Type()) { - fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi}) - } - } - } - - default: - panic(fmt.Sprintf("cannot flatten unsupported type %T", t)) - } - - a.flattenMemo[t] = fl - } - - return fl -} - -// sizeof returns the number of pointerlike abstractions (nodes) in the type t. -func (a *analysis) sizeof(t types.Type) uint32 { - return uint32(len(a.flatten(t))) -} - -// shouldTrack reports whether object type T contains (recursively) -// any fields whose addresses should be tracked. -func (a *analysis) shouldTrack(T types.Type) bool { - if a.track == trackAll { - return true // fast path - } - track, ok := a.trackTypes[T] - if !ok { - a.trackTypes[T] = true // break cycles conservatively - // NB: reflect.Value, reflect.Type are pre-populated to true. - for _, fi := range a.flatten(T) { - switch ft := fi.typ.Underlying().(type) { - case *types.Interface, *types.Signature: - track = true // needed for callgraph - case *types.Basic: - // no-op - case *types.Chan: - track = a.track&trackChan != 0 || a.shouldTrack(ft.Elem()) - case *types.Map: - track = a.track&trackMap != 0 || a.shouldTrack(ft.Key()) || a.shouldTrack(ft.Elem()) - case *types.Slice: - track = a.track&trackSlice != 0 || a.shouldTrack(ft.Elem()) - case *types.Pointer: - track = a.track&trackPtr != 0 || a.shouldTrack(ft.Elem()) - case *types.Array, *types.Struct: - // No need to look at field types since they will follow (flattened). - default: - // Includes *types.Tuple, which are never address-taken. - panic(ft) - } - if track { - break - } - } - a.trackTypes[T] = track - if !track && a.log != nil { - fmt.Fprintf(a.log, "\ttype not tracked: %s\n", T) - } - } - return track -} - -// offsetOf returns the (abstract) offset of field index within struct -// or tuple typ. -func (a *analysis) offsetOf(typ types.Type, index int) uint32 { - var offset uint32 - switch t := typ.Underlying().(type) { - case *types.Tuple: - for i := 0; i < index; i++ { - offset += a.sizeof(t.At(i).Type()) - } - case *types.Struct: - offset++ // the node for the struct itself - for i := 0; i < index; i++ { - offset += a.sizeof(t.Field(i).Type()) - } - default: - panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ)) - } - return offset -} - -// sliceToArray returns the type representing the arrays to which -// slice type slice points. -func sliceToArray(slice types.Type) *types.Array { - return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1) -} - -// Node set ------------------------------------------------------------------- - -type nodeset struct { - intsets.Sparse -} - -func (ns *nodeset) String() string { - var buf bytes.Buffer - buf.WriteRune('{') - var space [50]int - for i, n := range ns.AppendTo(space[:0]) { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteRune('n') - fmt.Fprintf(&buf, "%d", n) - } - buf.WriteRune('}') - return buf.String() -} - -func (ns *nodeset) add(n nodeid) bool { - return ns.Sparse.Insert(int(n)) -} - -func (ns *nodeset) addAll(y *nodeset) bool { - return ns.UnionWith(&y.Sparse) -} - -// Profiling & debugging ------------------------------------------------------- - -var timers = make(map[string]time.Time) - -func start(name string) { - if debugTimers { - timers[name] = time.Now() - log.Printf("%s...\n", name) - } -} - -func stop(name string) { - if debugTimers { - log.Printf("%s took %s\n", name, time.Since(timers[name])) - } -} - -// diff runs the command "diff a b" and reports its success. -func diff(a, b string) bool { - var cmd *exec.Cmd - switch runtime.GOOS { - case "plan9": - cmd = exec.Command("/bin/diff", "-c", a, b) - default: - cmd = exec.Command("/usr/bin/diff", "-u", a, b) - } - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - return cmd.Run() == nil -} From 1943c1e3e102b1183f51034225010c48f70a0964 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 30 May 2023 21:10:18 -0400 Subject: [PATCH 088/109] internal/diff: fix LineEdits bug in fast path The fast-path "optimization" that skips the main algorithm when the input is already line-aligned failed to check that the replacement text consisted of complete lines. (Scare quotes because removing the "optimization" causes tests to fail. See CL 499377 next in stack for why.) Thanks to pjw for diagnosing the root cause and providing the test case in CL 498975. Fixes golang/go#60379 Change-Id: I2ff92de4550754691442362b8a8932ee42971461 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499376 gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Peter Weinberger --- internal/diff/diff.go | 6 ++++-- internal/diff/diff_test.go | 8 ++++++++ internal/diff/difftest/difftest.go | 17 +++++++++++++++++ 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/internal/diff/diff.go b/internal/diff/diff.go index 602f1e7cd15..a181d674b70 100644 --- a/internal/diff/diff.go +++ b/internal/diff/diff.go @@ -114,13 +114,15 @@ func lineEdits(src string, edits []Edit) ([]Edit, error) { return nil, err } - // Do all edits begin and end at the start of a line? + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? // TODO(adonovan, pjw): why does omitting this 'optimization' // cause tests to fail? (TestDiff/insert-line,extra_newline) for _, edit := range edits { if edit.Start >= len(src) || // insertion at EOF edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start - edit.End > 0 && src[edit.End-1] != '\n' { // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert goto expand } } diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go index b6881c1f2f0..9d014495fdf 100644 --- a/internal/diff/diff_test.go +++ b/internal/diff/diff_test.go @@ -107,6 +107,14 @@ func TestLineEdits(t *testing.T) { if !reflect.DeepEqual(got, edits) { t.Errorf("LineEdits got\n%q, want\n%q\n%#v", got, edits, tc) } + // make sure that applying the edits gives the expected result + fixed, err := diff.Apply(tc.In, got) + if err != nil { + t.Error(err) + } + if fixed != tc.Out { + t.Errorf("Apply(LineEdits): got %q, want %q", fixed, tc.Out) + } }) } } diff --git a/internal/diff/difftest/difftest.go b/internal/diff/difftest/difftest.go index 9b00590f67d..a999f8951ad 100644 --- a/internal/diff/difftest/difftest.go +++ b/internal/diff/difftest/difftest.go @@ -279,6 +279,23 @@ var TestCases = []struct { Edits: []diff.Edit{{Start: 3, End: 3, New: "\nbbb"}}, LineEdits: []diff.Edit{{Start: 0, End: 4, New: "aaa\nbbb\n"}}, Unified: UnifiedPrefix + "@@ -1,2 +1,3 @@\n aaa\n+bbb\n ccc\n", + }, { + Name: "60379", + In: `package a + +type S struct { +s fmt.Stringer +} +`, + Out: `package a + +type S struct { + s fmt.Stringer +} +`, + Edits: []diff.Edit{{Start: 27, End: 27, New: "\t"}}, + LineEdits: []diff.Edit{{Start: 27, End: 42, New: "\ts fmt.Stringer\n"}}, + Unified: UnifiedPrefix + "@@ -1,5 +1,5 @@\n package a\n \n type S struct {\n-s fmt.Stringer\n+\ts fmt.Stringer\n }\n", }, } From 0b4461babc215b6f789c75cf0086558c1f4ff4e9 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 30 May 2023 22:54:01 -0400 Subject: [PATCH 089/109] internal/diff: fix LineEdits bug in slow path Previously, the expandEdit operation would expand to the end of the line unconditionally, but this caused it to gulp an extra line if it was already line-aligned. This change causes it to do the expansion only if the end is not line-aligned, or the replacement text doesn't end with a newline. Now, removing the fast path no longer causes tests to fail. This also allows us to remove the logic added in CL 489695 to work around issue golang/go#59232. Fixes golang/go#60379 Fixes golang/go#59232 Change-Id: Ia40e4e3bb714d75acb95103a38e8c49a8ef456de Reviewed-on: https://go-review.googlesource.com/c/tools/+/499377 Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Peter Weinberger --- internal/diff/diff.go | 21 +++++++++++++-------- internal/diff/diff_test.go | 12 ++++++------ internal/diff/difftest/difftest.go | 8 ++++---- internal/diff/unified.go | 13 ++----------- 4 files changed, 25 insertions(+), 29 deletions(-) diff --git a/internal/diff/diff.go b/internal/diff/diff.go index a181d674b70..19de1b28e94 100644 --- a/internal/diff/diff.go +++ b/internal/diff/diff.go @@ -18,7 +18,7 @@ type Edit struct { } func (e Edit) String() string { - return fmt.Sprintf("{Start:%d,End:%d,New:%s}", e.Start, e.End, e.New) + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) } // Apply applies a sequence of edits to the src buffer and returns the @@ -116,19 +116,21 @@ func lineEdits(src string, edits []Edit) ([]Edit, error) { // Do all deletions begin and end at the start of a line, // and all insertions end with a newline? - // TODO(adonovan, pjw): why does omitting this 'optimization' - // cause tests to fail? (TestDiff/insert-line,extra_newline) + // (This is merely a fast path.) for _, edit := range edits { if edit.Start >= len(src) || // insertion at EOF edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start edit.End > 0 && src[edit.End-1] != '\n' || // not at line start edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert - goto expand + goto expand // slow path } } return edits, nil // aligned expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } expanded := make([]Edit, 0, len(edits)) // a guess prev := edits[0] // TODO(adonovan): opt: start from the first misaligned edit. @@ -160,10 +162,13 @@ func expandEdit(edit Edit, src string) Edit { // Expand end right to end of line. end := edit.End - if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { - edit.End = len(src) // extend to EOF - } else { - edit.End = end + nl + 1 // extend beyond \n + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } } edit.New += src[end:edit.End] diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go index 9d014495fdf..7b25c3af5c3 100644 --- a/internal/diff/diff_test.go +++ b/internal/diff/diff_test.go @@ -95,17 +95,17 @@ func FuzzRoundTrip(f *testing.F) { func TestLineEdits(t *testing.T) { for _, tc := range difftest.TestCases { t.Run(tc.Name, func(t *testing.T) { - // if line edits not specified, it is the same as edits - edits := tc.LineEdits - if edits == nil { - edits = tc.Edits + want := tc.LineEdits + if want == nil { + want = tc.Edits // already line-aligned } got, err := diff.LineEdits(tc.In, tc.Edits) if err != nil { t.Fatalf("LineEdits: %v", err) } - if !reflect.DeepEqual(got, edits) { - t.Errorf("LineEdits got\n%q, want\n%q\n%#v", got, edits, tc) + if !reflect.DeepEqual(got, want) { + t.Errorf("in=<<%s>>\nout=<<%s>>\nraw edits=%s\nline edits=%s\nwant: %s", + tc.In, tc.Out, tc.Edits, got, want) } // make sure that applying the edits gives the expected result fixed, err := diff.Apply(tc.In, got) diff --git a/internal/diff/difftest/difftest.go b/internal/diff/difftest/difftest.go index a999f8951ad..fb691edc386 100644 --- a/internal/diff/difftest/difftest.go +++ b/internal/diff/difftest/difftest.go @@ -28,7 +28,7 @@ const ( var TestCases = []struct { Name, In, Out, Unified string - Edits, LineEdits []diff.Edit + Edits, LineEdits []diff.Edit // expectation (LineEdits=nil => already line-aligned) NoDiff bool }{{ Name: "empty", @@ -220,9 +220,9 @@ var TestCases = []struct { {Start: 14, End: 14, New: "C\n"}, }, LineEdits: []diff.Edit{ - {Start: 0, End: 6, New: "C\n"}, - {Start: 6, End: 8, New: "B\nA\n"}, - {Start: 10, End: 14, New: "A\n"}, + {Start: 0, End: 4, New: ""}, + {Start: 6, End: 6, New: "B\n"}, + {Start: 10, End: 12, New: ""}, {Start: 14, End: 14, New: "C\n"}, }, }, { diff --git a/internal/diff/unified.go b/internal/diff/unified.go index 3522e1e5b18..1308503f70c 100644 --- a/internal/diff/unified.go +++ b/internal/diff/unified.go @@ -155,18 +155,9 @@ func toUnified(fromName, toName string, content string, edits []Edit) (unified, last++ } if edit.New != "" { - for i, content := range splitLines(edit.New) { - toLine++ - // Merge identical Delete+Insert. - // This is an unwanted output of converting diffs to line diffs - // that is easiest to fix by postprocessing. - // e.g. issue #59232: ("aaa\nccc\n", "aaa\nbbb\nccc") - // -> [Delete "aaa\n", Insert "aaa\n", Insert "bbb\n", ...]. - if i == 0 && last > start && h.lines[len(h.lines)-1].content == content { - h.lines[len(h.lines)-1].kind = opEqual - continue - } + for _, content := range splitLines(edit.New) { h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ } } } From c35c44fa0fb6c46c71605623eaef24689a333a4a Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 1 Jun 2023 16:34:44 -0400 Subject: [PATCH 090/109] gopls/internal/lsp/cache: add assertions This change documents and asserts a few invariants that should be maintained by the code. The crash in golang/go#60551 shows that they are not, but I still can't see the mistake in my proof. Updates golang/go#60551 Change-Id: I833f7575f1d7372837ab5d7ba5988c94650ce07f Reviewed-on: https://go-review.googlesource.com/c/tools/+/500055 Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/analysis.go | 42 ++++++++++++++++------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go index 4679041b24f..04e2651f9e6 100644 --- a/gopls/internal/lsp/cache/analysis.go +++ b/gopls/internal/lsp/cache/analysis.go @@ -23,7 +23,6 @@ import ( "sort" "strings" "sync" - "time" "golang.org/x/sync/errgroup" "golang.org/x/tools/go/analysis" @@ -32,6 +31,7 @@ import ( "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/lsp/source" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/tag" "golang.org/x/tools/internal/facts" "golang.org/x/tools/internal/gcimporter" "golang.org/x/tools/internal/memoize" @@ -164,8 +164,6 @@ import ( // Destroy() // - share cache.{goVersionRx,parseGoImpl} -var born = time.Now() - // Analyze applies a set of analyzers to the package denoted by id, // and returns their diagnostics for that package. // @@ -174,9 +172,8 @@ var born = time.Now() // Precondition: all analyzers within the process have distinct names. // (The names are relied on by the serialization logic.) func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) { - if false { // debugging - log.Println("Analyze@", time.Since(born)) // called after the 7s IWL in k8s - } + ctx, done := event.Start(ctx, "snapshot.Analyze", tag.Package.Of(string(id))) + defer done() // Filter and sort enabled root analyzers. // A disabled analyzer may still be run if required by another. @@ -199,19 +196,13 @@ func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*sourc } } - if false { // debugging - // TODO(adonovan): use proper tracing. - t0 := time.Now() - defer func() { - log.Printf("%v for analyze(%s, %s)", time.Since(t0), id, enabled) - }() - } - // Run the analysis. res, err := s.analyze(ctx, id, enabled) if err != nil { return nil, err } + // Inv: res is the successful result of analyzeImpl(analyzers, id), + // which augments the successful result of actuallyAnalyze. // Report diagnostics only from enabled actions that succeeded. // Errors from creating or analyzing packages are ignored. @@ -225,7 +216,11 @@ func (s *snapshot) Analyze(ctx context.Context, id PackageID, analyzers []*sourc // results, we should propagate the per-action errors. var results []*source.Diagnostic for _, a := range enabled { - summary := res.Actions[a.Name] + summary, ok := res.Actions[a.Name] + if summary == nil { + panic(fmt.Sprintf("analyzeSummary.Actions[%q] = (nil, %t); got %v (#60551)", + a.Name, ok, res.Actions)) + } if summary.Err != "" { continue // action failed } @@ -309,7 +304,7 @@ type actionSummary struct { // analyze is a memoization of analyzeImpl. func (s *snapshot) analyze(ctx context.Context, id PackageID, analyzers []*analysis.Analyzer) (*analyzeSummary, error) { - // Use the sorted list of names of analyzers in the key. + // Use the caller-sorted list of names of analyzers in the key. // // TODO(adonovan): opt: account for analysis results at a // finer grain to avoid duplicate work when a @@ -568,6 +563,9 @@ func analysisCacheKey(analyzers []*analysis.Analyzer, m *source.Metadata, compil // actuallyAnalyze implements the cache-miss case. // This function does not access the snapshot. +// +// Postcondition: on success, the analyzeSummary.Actions +// key set is {a.Name for a in analyzers}. func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *source.Metadata, vdeps map[PackageID]*analyzeSummary, compiledGoFiles []source.FileHandle) (*analyzeSummary, error) { // Create a local FileSet for processing this package only. fset := token.NewFileSet() @@ -637,6 +635,7 @@ func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *sou // Execute the graph in parallel. execActions(roots) + // Inv: each root's summary is set (whether success or error). // Don't return (or cache) the result in case of cancellation. if err := ctx.Err(); err != nil { @@ -645,8 +644,11 @@ func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *sou // Return summaries only for the requested actions. summaries := make(map[string]*actionSummary) - for _, act := range roots { - summaries[act.a.Name] = act.summary + for _, root := range roots { + if root.summary == nil { + panic("root has nil action.summary (#60551)") + } + summaries[root.a.Name] = root.summary } return &analyzeSummary{ @@ -897,6 +899,7 @@ func (act *action) String() string { } // execActions executes a set of action graph nodes in parallel. +// Postcondition: each action.summary is set, even in case of error. func execActions(actions []*action) { var wg sync.WaitGroup for _, act := range actions { @@ -917,6 +920,9 @@ func execActions(actions []*action) { } } }) + if act.summary == nil { + panic("nil action.summary (#60551)") + } }() } wg.Wait() From 947adca5120c2d1a71f8e49ad3e1e1e77f51f7eb Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Wed, 31 May 2023 22:10:59 -0400 Subject: [PATCH 091/109] gopls/internal/lsp/source/methodsets: comparable also has no package Fix a crash where "error" is assumed to be the only named type with no package. Fixes golang/go#60544 Change-Id: I911296936f93429c780761979aeba38936b14428 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499196 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/lsp/source/methodsets/methodsets.go | 4 ++-- .../marker/testdata/diagnostics/issue60544.txt | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/issue60544.txt diff --git a/gopls/internal/lsp/source/methodsets/methodsets.go b/gopls/internal/lsp/source/methodsets/methodsets.go index dac369badbb..56b8ce37ecf 100644 --- a/gopls/internal/lsp/source/methodsets/methodsets.go +++ b/gopls/internal/lsp/source/methodsets/methodsets.go @@ -351,8 +351,8 @@ func fingerprint(method *types.Func) (string, bool) { if tname.Pkg() != nil { buf.WriteString(strconv.Quote(tname.Pkg().Path())) buf.WriteByte('.') - } else if tname.Name() != "error" { - panic(tname) // error is the only named type with no package + } else if tname.Name() != "error" && tname.Name() != "comparable" { + panic(tname) // error and comparable the only named types with no package } buf.WriteString(tname.Name()) diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/issue60544.txt b/gopls/internal/regtest/marker/testdata/diagnostics/issue60544.txt new file mode 100644 index 00000000000..b644d453164 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/issue60544.txt @@ -0,0 +1,13 @@ +This test exercises a crash due to treatment of "comparable" in methodset +calculation (golang/go#60544). + +-min_go is 1.19 as the error message changed at this Go version. +-- flags -- +-min_go=go1.19 + +-- main.go -- +package main + +type X struct{} + +func (X) test(x comparable) {} //@diag("comparable", re"outside a type constraint") From 04ceacbfbd2cfcd2975a071c1fa2728fd0495a17 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 1 Jun 2023 16:27:41 -0400 Subject: [PATCH 092/109] gopls/internal/lsp/source: fix panic in typeDefinition on comparable comparable is also permitted to have no position. Updates golang/go#60544 Change-Id: Ic0694796432ab8b3271a60e4f4f649a1657d462b Reviewed-on: https://go-review.googlesource.com/c/tools/+/499986 gopls-CI: kokoro Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley --- gopls/internal/lsp/source/identifier.go | 4 ++-- gopls/internal/lsp/source/type_definition.go | 16 ++++++++------ .../internal/regtest/misc/definition_test.go | 22 +++++++++++++++++++ 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/gopls/internal/lsp/source/identifier.go b/gopls/internal/lsp/source/identifier.go index 15fe13a9436..57001af930b 100644 --- a/gopls/internal/lsp/source/identifier.go +++ b/gopls/internal/lsp/source/identifier.go @@ -56,7 +56,7 @@ func searchForEnclosing(info *types.Info, path []ast.Node) *types.TypeName { // typeToObject returns the relevant type name for the given type, after // unwrapping pointers, arrays, slices, channels, and function signatures with -// a single non-error result. +// a single non-error result, and ignoring built-in named types. func typeToObject(typ types.Type) *types.TypeName { switch typ := typ.(type) { case *types.Named: @@ -79,7 +79,7 @@ func typeToObject(typ types.Type) *types.TypeName { for i := 0; i < results.Len(); i++ { obj := typeToObject(results.At(i).Type()) if obj == nil || hasErrorType(obj) { - // Skip builtins. + // Skip builtins. TODO(rfindley): should comparable be handled here as well? continue } if res != nil { diff --git a/gopls/internal/lsp/source/type_definition.go b/gopls/internal/lsp/source/type_definition.go index 73fc9659983..6c26b1693f8 100644 --- a/gopls/internal/lsp/source/type_definition.go +++ b/gopls/internal/lsp/source/type_definition.go @@ -9,6 +9,7 @@ import ( "fmt" "go/token" + "golang.org/x/tools/gopls/internal/bug" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/internal/event" ) @@ -35,19 +36,20 @@ func TypeDefinition(ctx context.Context, snapshot Snapshot, fh FileHandle, posit return nil, nil } - typObj := typeToObject(obj.Type()) - if typObj == nil { + tname := typeToObject(obj.Type()) + if tname == nil { return nil, fmt.Errorf("no type definition for %s", obj.Name()) } - // Identifiers with the type "error" are a special case with no position. - if hasErrorType(typObj) { - // TODO(rfindley): we can do better here, returning a link to the builtin - // file. + if !tname.Pos().IsValid() { + // The only defined types with no position are error and comparable. + if tname.Name() != "error" && tname.Name() != "comparable" { + bug.Reportf("unexpected type name with no position: %s", tname) + } return nil, nil } - loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, typObj.Pos(), typObj.Pos()+token.Pos(len(typObj.Name()))) + loc, err := mapPosition(ctx, pkg.FileSet(), snapshot, tname.Pos(), tname.Pos()+token.Pos(len(tname.Name()))) if err != nil { return nil, err } diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go index 9f24ef6d369..0a36336b567 100644 --- a/gopls/internal/regtest/misc/definition_test.go +++ b/gopls/internal/regtest/misc/definition_test.go @@ -371,6 +371,28 @@ func main() {} } } +func TestGoToTypeDefinition_Issue60544(t *testing.T) { + const mod = ` +-- go.mod -- +module mod.com + +go 1.19 +-- main.go -- +package main + +func F[T comparable]() {} +` + + Run(t, mod, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + + _, err := env.Editor.GoToTypeDefinition(env.Ctx, env.RegexpSearch("main.go", "comparable")) // must not panic + if err != nil { + t.Fatal(err) + } + }) +} + // Test for golang/go#47825. func TestImportTestVariant(t *testing.T) { const mod = ` From 86c93e8732cce300d0270bce23117456ce92bb17 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 1 Jun 2023 06:19:57 -0400 Subject: [PATCH 093/109] gopls: unimported completion should use the completion matcher Fix a bug where unimported completion was using strings.HasPrefix directly, rather than using the configured completion matcher. Fixes golang/go#60545 Change-Id: I96e8e0b2dbfd9f007b166d4a82399c591ffd823a Reviewed-on: https://go-review.googlesource.com/c/tools/+/499795 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro --- .../lsp/source/completion/completion.go | 25 +++++++++++++------ .../marker/testdata/completion/issue60545.txt | 24 ++++++++++++++++++ 2 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 gopls/internal/regtest/marker/testdata/completion/issue60545.txt diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go index bc2b0c31bd7..de54a3f11f5 100644 --- a/gopls/internal/lsp/source/completion/completion.go +++ b/gopls/internal/lsp/source/completion/completion.go @@ -1210,8 +1210,10 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { // quickParse does a quick parse of a single file of package m, // extracts exported package members and adds candidates to c.items. - var itemsMu sync.Mutex // guards c.items - var enough int32 // atomic bool + // TODO(rfindley): synchronizing access to c here does not feel right. + // Consider adding a concurrency-safe API for completer. + var cMu sync.Mutex // guards c.items and c.matcher + var enough int32 // atomic bool quickParse := func(uri span.URI, m *source.Metadata) error { if atomic.LoadInt32(&enough) != 0 { return nil @@ -1231,13 +1233,22 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { return } - if !id.IsExported() || - sel.Sel.Name != "_" && !strings.HasPrefix(id.Name, sel.Sel.Name) { - return // not a match + if !id.IsExported() { + return + } + + cMu.Lock() + score := c.matcher.Score(id.Name) + cMu.Unlock() + + if sel.Sel.Name != "_" && score == 0 { + return // not a match; avoid constructing the completion item below } // The only detail is the kind and package: `var (from "example.com/foo")` // TODO(adonovan): pretty-print FuncDecl.FuncType or TypeSpec.Type? + // TODO(adonovan): should this score consider the actual c.matcher.Score + // of the item? How does this compare with the deepState.enqueue path? item := CompletionItem{ Label: id.Name, Detail: fmt.Sprintf("%s (from %q)", strings.ToLower(tok.String()), m.PkgPath), @@ -1298,12 +1309,12 @@ func (c *completer) selector(ctx context.Context, sel *ast.SelectorExpr) error { item.snippet = &sn } - itemsMu.Lock() + cMu.Lock() c.items = append(c.items, item) if len(c.items) >= unimportedMemberTarget { atomic.StoreInt32(&enough, 1) } - itemsMu.Unlock() + cMu.Unlock() }) return nil } diff --git a/gopls/internal/regtest/marker/testdata/completion/issue60545.txt b/gopls/internal/regtest/marker/testdata/completion/issue60545.txt new file mode 100644 index 00000000000..67221a67563 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/completion/issue60545.txt @@ -0,0 +1,24 @@ +This test checks that unimported completion is case-insensitive. + +-- go.mod -- +module mod.test + +go 1.18 + +-- main.go -- +package main + +func main() { + fmt.p //@complete(re"p()","Print", "Printf", "Println"), diag("fmt", re"(undefined|undeclared)") +} + +-- other.go -- +package main + +// Including another package that imports "fmt" causes completion to use the +// existing metadata, which is the codepath leading to golang/go#60545. +import "fmt" + +func _() { + fmt.Println() +} From 726c727df97f9144045163b8df42c855b4314612 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Fri, 26 May 2023 11:30:54 +0700 Subject: [PATCH 094/109] gopls/internal/lsp: enable min/max builtin completion test For golang/go#59488 Change-Id: I93680138c90750454b4d94af6dc84fe942c9dd34 Reviewed-on: https://go-review.googlesource.com/c/tools/+/498516 Run-TryBot: Cuong Manh Le Reviewed-by: Robert Findley Reviewed-by: Matthew Dempsky TryBot-Result: Gopher Robot Auto-Submit: Cuong Manh Le gopls-CI: kokoro --- gopls/internal/lsp/completion_test.go | 17 ++--------------- .../internal/lsp/testdata/builtins/builtins.go | 4 ++-- 2 files changed, 4 insertions(+), 17 deletions(-) diff --git a/gopls/internal/lsp/completion_test.go b/gopls/internal/lsp/completion_test.go index bef5e11e340..1fc7304fc43 100644 --- a/gopls/internal/lsp/completion_test.go +++ b/gopls/internal/lsp/completion_test.go @@ -25,8 +25,8 @@ func (r *runner) Completion(t *testing.T, src span.Span, test tests.Completion, opts.LiteralCompletions = strings.Contains(string(src.URI()), "literal") opts.ExperimentalPostfixCompletions = strings.Contains(string(src.URI()), "postfix") }) - got = filterSkipCompletionItems(tests.FilterBuiltins(src, got)) - want := filterSkipCompletionItems(expected(t, test, items)) + got = tests.FilterBuiltins(src, got) + want := expected(t, test, items) if diff := tests.DiffCompletionItems(want, got); diff != "" { t.Errorf("mismatching completion items (-want +got):\n%s", diff) } @@ -175,16 +175,3 @@ func (r *runner) callCompletion(t *testing.T, src span.Span, options func(*sourc } return list.Items } - -func filterSkipCompletionItems(items []protocol.CompletionItem) []protocol.CompletionItem { - n := 0 - for _, item := range items { - // TODO(cuonglm): remove once https://go-review.googlesource.com/c/go/+/498495 land. - if item.Label == "max" || item.Label == "min" { - continue - } - items[n] = item - n++ - } - return items[:n] -} diff --git a/gopls/internal/lsp/testdata/builtins/builtins.go b/gopls/internal/lsp/testdata/builtins/builtins.go index 47fa682e8d7..2e3361c7e6d 100644 --- a/gopls/internal/lsp/testdata/builtins/builtins.go +++ b/gopls/internal/lsp/testdata/builtins/builtins.go @@ -28,8 +28,8 @@ package builtins /* int8 */ //@item(int8, "int8", "", "type") /* iota */ //@item(iota, "iota", "", "const") /* len(v Type) int */ //@item(len, "len", "func(v Type) int", "func") -/* max(x Type, y ...Type) Type */ //@item(max, "max", "func(x Type, y ...Type) Type", "func") -/* min(y Type, y ...Type) Type */ //@item(min, "min", "func(y Type, y ...Type) Type", "func") +/* max(x T, y ...T) T */ //@item(max, "max", "func(x T, y ...T) T", "func") +/* min(y T, y ...T) T */ //@item(min, "min", "func(x T, y ...T) T", "func") /* make(t Type, size ...int) Type */ //@item(make, "make", "func(t Type, size ...int) Type", "func") /* new(Type) *Type */ //@item(new, "new", "func(Type) *Type", "func") /* nil */ //@item(_nil, "nil", "", "var") From 6f567c8090cb88f13a71b19595bf88c6b27dbeed Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Sat, 3 Jun 2023 15:34:18 -0400 Subject: [PATCH 095/109] gopls/internal/lsp/filecache: reduce lifespan of os.FileInfos These objects accounted for 30MB in a recent heap profile since they are large structs. This change selects just the two fields we need. Change-Id: I4b1ab713d82a73e851785c42784b1bfff75341c3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/500635 Run-TryBot: Alan Donovan Reviewed-by: Robert Findley Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/filecache/filecache.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/gopls/internal/lsp/filecache/filecache.go b/gopls/internal/lsp/filecache/filecache.go index df84693d24d..47ee89e41b0 100644 --- a/gopls/internal/lsp/filecache/filecache.go +++ b/gopls/internal/lsp/filecache/filecache.go @@ -427,8 +427,9 @@ func gc(goplsDir string) { for { // Enumerate all files in the cache. type item struct { - path string - stat os.FileInfo + path string + mtime time.Time + size int64 } var files []item start := time.Now() @@ -454,7 +455,7 @@ func gc(goplsDir string) { } os.Remove(path) // ignore error } else { - files = append(files, item{path, stat}) + files = append(files, item{path, stat.ModTime(), stat.Size()}) total += stat.Size() if debug && len(files)%1000 == 0 { log.Printf("filecache: checked %d files in %v", len(files), time.Since(start)) @@ -469,7 +470,7 @@ func gc(goplsDir string) { // Sort oldest files first. sort.Slice(files, func(i, j int) bool { - return files[i].stat.ModTime().Before(files[j].stat.ModTime()) + return files[i].mtime.Before(files[j].mtime) }) // Delete oldest files until we're under budget. @@ -479,13 +480,14 @@ func gc(goplsDir string) { break } if debug { - age := time.Since(file.stat.ModTime()) + age := time.Since(file.mtime) log.Printf("budget: deleting stale file %s (%dB, age %v)", - file.path, file.stat.Size(), age) + file.path, file.size, age) } os.Remove(file.path) // ignore error - total -= file.stat.Size() + total -= file.size } + files = nil // release memory before sleep time.Sleep(period) From a01290f9844baeb2bacb81f21640f46b78680918 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 6 Jun 2023 12:21:30 -0400 Subject: [PATCH 096/109] internal/typeparams: work around LookupFieldOrMethod inconsistency This change adds to x/tools a workaround for a bug in go/types that causes LookupFieldOrMethod and NewTypeSet to be inconsistent wrt an ill-typed method (*T).f where T itself is a pointer. The workaround is that, if Lookup fails, we walk the MethodSet. Updates golang/go#60634 Fixes golang/go#60628 Change-Id: I87caa2ae077e5cdfa40b65a2f52e261384c91167 Reviewed-on: https://go-review.googlesource.com/c/tools/+/501197 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Run-TryBot: Alan Donovan gopls-CI: kokoro --- internal/typeparams/common.go | 20 ++++++++++++ internal/typeparams/common_test.go | 50 ++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/internal/typeparams/common.go b/internal/typeparams/common.go index cfba8189f15..b9e87c691a3 100644 --- a/internal/typeparams/common.go +++ b/internal/typeparams/common.go @@ -105,6 +105,26 @@ func OriginMethod(fn *types.Func) *types.Func { } orig := NamedTypeOrigin(named) gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + return gfn.(*types.Func) } diff --git a/internal/typeparams/common_test.go b/internal/typeparams/common_test.go index 68ef6c6f0e2..d1f13fa7f53 100644 --- a/internal/typeparams/common_test.go +++ b/internal/typeparams/common_test.go @@ -140,10 +140,12 @@ func TestOriginMethodUses(t *testing.T) { t.Fatal(err) } + // Look up func T.m. T := pkg.Scope().Lookup("T").Type() obj, _, _ := types.LookupFieldOrMethod(T, true, pkg, "m") m := obj.(*types.Func) + // Assert that the origin of each t.m() call is p.T.m. ast.Inspect(f, func(n ast.Node) bool { if call, ok := n.(*ast.CallExpr); ok { sel := call.Fun.(*ast.SelectorExpr) @@ -158,6 +160,54 @@ func TestOriginMethodUses(t *testing.T) { } } +// Issue #60628 was a crash in gopls caused by inconsistency (#60634) between +// LookupFieldOrMethod and NewFileSet for methods with an illegal +// *T receiver type, where T itself is a pointer. +// This is a regression test for the workaround in OriginMethod. +func TestOriginMethod60628(t *testing.T) { + const src = `package p; type T[P any] *int; func (r *T[A]) f() {}` + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "p.go", src, 0) + if err != nil { + t.Fatal(err) + } + + // Expect type error: "invalid receiver type T[A] (pointer or interface type)". + info := types.Info{ + Uses: make(map[*ast.Ident]types.Object), + } + var conf types.Config + pkg, _ := conf.Check("p", fset, []*ast.File{f}, &info) // error expected + if pkg == nil { + t.Fatal("no package") + } + + // Look up methodset of *T. + T := pkg.Scope().Lookup("T").Type() + mset := types.NewMethodSet(types.NewPointer(T)) + if mset.Len() == 0 { + t.Errorf("NewMethodSet(*T) is empty") + } + for i := 0; i < mset.Len(); i++ { + sel := mset.At(i) + m := sel.Obj().(*types.Func) + + // TODO(adonovan): check the consistency property required to fix #60634. + if false { + m2, _, _ := types.LookupFieldOrMethod(T, true, m.Pkg(), m.Name()) + if m2 != m { + t.Errorf("LookupFieldOrMethod(%v, indirect=true, %v) = %v, want %v", + T, m, m2, m) + } + } + + // Check the workaround. + if OriginMethod(m) == nil { + t.Errorf("OriginMethod(%v) = nil", m) + } + } +} + func TestGenericAssignableTo(t *testing.T) { testenv.NeedsGo1Point(t, 18) From db6a81ed14d3bfbf533dac957ec98a09b78b3f36 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 9 Jun 2023 17:12:07 -0400 Subject: [PATCH 097/109] go/packages/packagestest: set Config.Logf if the test is run verbosely The Config.Logf field enables debug logging in go/packages, which is exactly what we want when running a test in verbose mode to debug it. For golang/go#60650. Change-Id: I36b47e214860b5aec7c66042fc0ceb50c7062f1a Reviewed-on: https://go-review.googlesource.com/c/tools/+/502175 Reviewed-by: Michael Matloob TryBot-Bypass: Bryan Mills Run-TryBot: Bryan Mills Auto-Submit: Bryan Mills gopls-CI: kokoro --- go/packages/packagestest/export.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go/packages/packagestest/export.go b/go/packages/packagestest/export.go index b687a44fb4f..16ded99ba6e 100644 --- a/go/packages/packagestest/export.go +++ b/go/packages/packagestest/export.go @@ -217,6 +217,9 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported { written: map[string]map[string]string{}, ExpectFileSet: token.NewFileSet(), } + if testing.Verbose() { + exported.Config.Logf = t.Logf + } defer func() { if t.Failed() || t.Skipped() { exported.Cleanup() From c59d87f5da3b83b2aa9eb7dcb2f2a95da0442f6d Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 12 Jun 2023 11:15:05 -0400 Subject: [PATCH 098/109] gopls/internal/lsp/cache: two minor simplifications While reading the code during talk prep, I noticed these two places where the code was unclear. Change-Id: I1c9d60a9abf78592422c165ef74b7d5414d5d400 Reviewed-on: https://go-review.googlesource.com/c/tools/+/502535 TryBot-Bypass: Alan Donovan Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley Auto-Submit: Alan Donovan --- gopls/internal/lsp/cache/check.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index 663127001e3..5f5400b9054 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -984,8 +984,7 @@ func (b *packageHandleBuilder) validatePackageHandle(prevPH, ph *packageHandle) // Opt: if no dep keys have changed, we need not re-evaluate the key. if prevPH != nil { - depsChanged := true - depsChanged = false + depsChanged := false assert(len(prevPH.depKeys) == len(ph.depKeys), "mismatching dep count") for id, newKey := range ph.depKeys { oldKey, ok := prevPH.depKeys[id] @@ -995,7 +994,6 @@ func (b *packageHandleBuilder) validatePackageHandle(prevPH, ph *packageHandle) break } } - if !depsChanged { return nil // key cannot have changed } @@ -1010,10 +1008,8 @@ func (b *packageHandleBuilder) validatePackageHandle(prevPH, ph *packageHandle) // A predecessor failed to build due to e.g. context cancellation. return fmt.Errorf("missing transitive refs for %s", dep.m.ID) } - for name, set := range trefs { - if token.IsExported(name) { - reachable.Union(set) - } + for _, set := range trefs { + reachable.Union(set) } } From 6e1595c15624167f91982fce2d4ac09c9fde5ff2 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 6 Jun 2023 21:28:46 -0400 Subject: [PATCH 099/109] internal/gcimporter: treat unknown constant values the same as invalid Fixes a crash resulting from trying to convert an unknown constant value. Fixes golang/go#60605 Change-Id: If6b831b8fe2f9690b9f89e191b329eb7660f5e14 Reviewed-on: https://go-review.googlesource.com/c/tools/+/501209 TryBot-Bypass: Robert Findley gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Robert Griesemer --- .../testdata/diagnostics/issue60605.txt | 12 +++ internal/gcimporter/gcimporter_test.go | 101 ++++++++++-------- internal/gcimporter/iexport.go | 11 ++ 3 files changed, 81 insertions(+), 43 deletions(-) create mode 100644 gopls/internal/regtest/marker/testdata/diagnostics/issue60605.txt diff --git a/gopls/internal/regtest/marker/testdata/diagnostics/issue60605.txt b/gopls/internal/regtest/marker/testdata/diagnostics/issue60605.txt new file mode 100644 index 00000000000..f80857dcb99 --- /dev/null +++ b/gopls/internal/regtest/marker/testdata/diagnostics/issue60605.txt @@ -0,0 +1,12 @@ +This test verifies that we can export constants with unknown kind. +Previously, the exporter would panic while attempting to convert such constants +to their target type (float64, in this case). + +-- go.mod -- +module mod.txt/p + +go 1.20 +-- p.go -- +package p + +const EPSILON float64 = 1e- //@diag(re"1e-()", re"exponent has no digits") diff --git a/internal/gcimporter/gcimporter_test.go b/internal/gcimporter/gcimporter_test.go index 3d17e114d0d..e13cfd9219a 100644 --- a/internal/gcimporter/gcimporter_test.go +++ b/internal/gcimporter/gcimporter_test.go @@ -786,57 +786,72 @@ func TestIssue57015(t *testing.T) { } // This is a regression test for a failure to export a package -// containing a specific type error. +// containing type errors. // -// Though the issue and test are specific, they may be representatives -// of class of exporter bugs on ill-typed code that we have yet to -// flush out. +// Though the issues and tests are specific, they may be representatives of a +// class of exporter bugs on ill-typed code that we have yet to flush out. // // TODO(adonovan): systematize our search for similar problems using -// fuzz testing, and drive this test from a table of test cases -// discovered by fuzzing. -func TestIssue57729(t *testing.T) { - // The lack of a receiver causes Recv.Type=Invalid. - // (The type checker then treats Foo as a package-level - // function, inserting it into the package scope.) - // The exporter needs to apply the same treatment. - const src = `package p; func () Foo() {}` - - // Parse the ill-typed input. - fset := token.NewFileSet() - f, err := goparser.ParseFile(fset, "p.go", src, 0) - if err != nil { - t.Fatalf("parse: %v", err) - } +// fuzz testing. +func TestExportInvalid(t *testing.T) { - // Type check it, expecting errors. - config := &types.Config{ - Error: func(err error) { t.Log(err) }, // don't abort at first error - } - pkg1, _ := config.Check("p", fset, []*ast.File{f}, nil) + tests := []struct { + name string + src string + objName string + }{ + // The lack of a receiver causes Recv.Type=Invalid. + // (The type checker then treats Foo as a package-level + // function, inserting it into the package scope.) + // The exporter needs to apply the same treatment. + {"issue 57729", `package p; func () Foo() {}`, "Foo"}, - // Export it. - // (Shallowness isn't important here.) - data, err := IExportShallow(fset, pkg1) - if err != nil { - t.Fatalf("export: %v", err) // any failure to export is a bug + // It must be possible to export a constant with unknown kind, even if its + // type is known. + {"issue 60605", `package p; const EPSILON float64 = 1e-`, "EPSILON"}, } - // Re-import it. - imports := make(map[string]*types.Package) - insert := func(pkg1 *types.Package, name string) { panic("unexpected insert") } - pkg2, err := IImportShallow(fset, GetPackageFromMap(imports), data, "p", insert) - if err != nil { - t.Fatalf("import: %v", err) // any failure of IExport+IImport is a bug. - } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Parse the ill-typed input. + fset := token.NewFileSet() + + f, err := goparser.ParseFile(fset, "p.go", test.src, 0) + if f == nil { + // Some test cases may have parse errors, but we must always have a + // file. + t.Fatalf("ParseFile returned nil file. Err: %v", err) + } + + // Type check it, expecting errors. + config := &types.Config{ + Error: func(err error) { t.Log(err) }, // don't abort at first error + } + pkg1, _ := config.Check("p", fset, []*ast.File{f}, nil) + + // Export it. + // (Shallowness isn't important here.) + data, err := IExportShallow(fset, pkg1) + if err != nil { + t.Fatalf("export: %v", err) // any failure to export is a bug + } - // Check that Lookup("Foo") still returns something. - // We can't assert the type hasn't change: it has, - // from a method of Invalid to a standalone function. - hasObj1 := pkg1.Scope().Lookup("Foo") != nil - hasObj2 := pkg2.Scope().Lookup("Foo") != nil - if hasObj1 != hasObj2 { - t.Errorf("export+import changed Lookup('Foo')!=nil: was %t, became %t", hasObj1, hasObj2) + // Re-import it. + imports := make(map[string]*types.Package) + insert := func(pkg1 *types.Package, name string) { panic("unexpected insert") } + pkg2, err := IImportShallow(fset, GetPackageFromMap(imports), data, "p", insert) + if err != nil { + t.Fatalf("import: %v", err) // any failure of IExport+IImport is a bug. + } + + // Check that the expected object is present in both packages. + // We can't assert the type hasn't changed: it may have, in some cases. + hasObj1 := pkg1.Scope().Lookup(test.objName) != nil + hasObj2 := pkg2.Scope().Lookup(test.objName) != nil + if hasObj1 != hasObj2 { + t.Errorf("export+import changed Lookup(%q)!=nil: was %t, became %t", test.objName, hasObj1, hasObj2) + } + }) } } diff --git a/internal/gcimporter/iexport.go b/internal/gcimporter/iexport.go index 9930d8c36a7..3fc7989c083 100644 --- a/internal/gcimporter/iexport.go +++ b/internal/gcimporter/iexport.go @@ -913,6 +913,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) From c43232f868439dcfd25d98501dafa5e695cd1d35 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 13 Jun 2023 11:25:28 -0400 Subject: [PATCH 100/109] cmd/digraph: improve examples using go list, mod Change-Id: Ib04b11b32f40f1602eb4cf0837e331b77511b6c5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/502875 gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Bypass: Alan Donovan Reviewed-by: Robert Findley --- cmd/digraph/digraph.go | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go index 0e50ad18dcb..93130a018ce 100644 --- a/cmd/digraph/digraph.go +++ b/cmd/digraph/digraph.go @@ -63,18 +63,27 @@ shirt -> sweater, not shirt -> tie -> sweater. Example usage: -Using digraph with existing Go tools: +Show which clothes (see above) must be donned before a jacket: - $ go mod graph | digraph nodes # Operate on the Go module graph. - $ go list -m all | digraph nodes # Operate on the Go package graph. + $ digraph reverse jacket -Show the transitive closure of imports of the digraph tool itself: +Many tools can be persuaded to produce output in digraph format, +as in the following examples. - $ go list -f '{{.ImportPath}} {{join .Imports " "}}' ... | digraph forward golang.org/x/tools/cmd/digraph +Using an import graph produced by go list, show a path that indicates +why the gopls application depends on the cmp package: -Show which clothes (see above) must be donned before a jacket: + $ go list -f '{{.ImportPath}} {{join .Imports " "}}' -deps golang.org/x/tools/gopls | + digraph somepath golang.org/x/tools/gopls github.com/google/go-cmp/cmp - $ digraph reverse jacket +Show which packages in x/tools depend, perhaps indirectly, on the callgraph package: + + $ go list -f '{{.ImportPath}} {{join .Imports " "}}' -deps golang.org/x/tools/... | + digraph reverse golang.org/x/tools/go/callgraph + +Using a module graph produced by go mod, show all dependencies of the current module: + + $ go mod graph | digraph forward $(go list -m) */ package main // import "golang.org/x/tools/cmd/digraph" From 85be8882c905a9b24abcec8363e48a14ad5cd4e2 Mon Sep 17 00:00:00 2001 From: Aaron Delaney Date: Thu, 1 Jun 2023 15:13:23 +0100 Subject: [PATCH 101/109] go/analysis/passes/defers: add analyser for defer mistake This is adding an analysis pass to catch defer statements where people intend to invoke a defer arguments when the defer is ran; not when it is first invoked. In order to achieve this, the current analyasis implementation first uses the inspect.Preorder tool to look for defer nodes. It then walks the defer node expression tree. This solution means that we don't catch function literals, and maybe it's slightly unoptimized because it doesn't use the Inspect fast node filtering once we find the defer nodes. Updates golang/go#60048. Change-Id: I50ec60c7fc4a5ced858f42cb8db8e9ea37a7038f Reviewed-on: https://go-review.googlesource.com/c/tools/+/499875 TryBot-Bypass: Alan Donovan Reviewed-by: Alan Donovan Reviewed-by: Robert Findley Auto-Submit: Alan Donovan gopls-CI: kokoro Run-TryBot: Alan Donovan --- go/analysis/passes/defers/cmd/defers/main.go | 13 ++++ go/analysis/passes/defers/defer.go | 60 +++++++++++++++++++ go/analysis/passes/defers/defer_test.go | 17 ++++++ go/analysis/passes/defers/doc.go | 25 ++++++++ go/analysis/passes/defers/testdata/src/a/a.go | 59 ++++++++++++++++++ 5 files changed, 174 insertions(+) create mode 100644 go/analysis/passes/defers/cmd/defers/main.go create mode 100644 go/analysis/passes/defers/defer.go create mode 100644 go/analysis/passes/defers/defer_test.go create mode 100644 go/analysis/passes/defers/doc.go create mode 100644 go/analysis/passes/defers/testdata/src/a/a.go diff --git a/go/analysis/passes/defers/cmd/defers/main.go b/go/analysis/passes/defers/cmd/defers/main.go new file mode 100644 index 00000000000..b3dc8b94eca --- /dev/null +++ b/go/analysis/passes/defers/cmd/defers/main.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The defers command runs the defers analyzer. +package main + +import ( + "golang.org/x/tools/go/analysis/passes/defers" + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { singlechecker.Main(defers.Analyzer) } diff --git a/go/analysis/passes/defers/defer.go b/go/analysis/passes/defers/defer.go new file mode 100644 index 00000000000..19474bcc4e8 --- /dev/null +++ b/go/analysis/passes/defers/defer.go @@ -0,0 +1,60 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package defers + +import ( + _ "embed" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +//go:embed doc.go +var doc string + +// Analyzer is the defer analyzer. +var Analyzer = &analysis.Analyzer{ + Name: "defer", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Doc: analysisutil.MustExtractDoc(doc, "defer"), + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "time") { + return nil, nil + } + + checkDeferCall := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.CallExpr: + fn, ok := typeutil.Callee(pass.TypesInfo, v).(*types.Func) + if ok && fn.Name() == "Since" && fn.Pkg().Path() == "time" { + pass.Reportf(v.Pos(), "call to time.Since is not deferred") + } + case *ast.FuncLit: + return false // prune + } + return true + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.DeferStmt)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + d := n.(*ast.DeferStmt) + ast.Inspect(d.Call, checkDeferCall) + }) + + return nil, nil +} diff --git a/go/analysis/passes/defers/defer_test.go b/go/analysis/passes/defers/defer_test.go new file mode 100644 index 00000000000..57881f022d4 --- /dev/null +++ b/go/analysis/passes/defers/defer_test.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package defers_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/go/analysis/passes/defers" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + analysistest.Run(t, testdata, defers.Analyzer, "a") +} diff --git a/go/analysis/passes/defers/doc.go b/go/analysis/passes/defers/doc.go new file mode 100644 index 00000000000..ec9f7664062 --- /dev/null +++ b/go/analysis/passes/defers/doc.go @@ -0,0 +1,25 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defers defines an Analyzer that checks for common mistakes in defer +// statements. +// +// # Analyzer defer +// +// defer: report common mistakes in defer statements +// +// The defer analyzer reports a diagnostic when a defer statement would +// result in a non-deferred call to time.Since, as experience has shown +// that this is nearly always a mistake. +// +// For example: +// +// start := time.Now() +// ... +// defer recordLatency(time.Since(start)) // error: call to time.Since is not deferred +// +// The correct code is: +// +// defer func() { recordLatency(time.Since(start)) }()` +package defers diff --git a/go/analysis/passes/defers/testdata/src/a/a.go b/go/analysis/passes/defers/testdata/src/a/a.go new file mode 100644 index 00000000000..e8bc8cde3ba --- /dev/null +++ b/go/analysis/passes/defers/testdata/src/a/a.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import ( + "fmt" + "time" +) + +func Since() (t time.Duration) { + return +} + +func x(time.Duration) {} +func x2(float64) {} + +func good() { + // The following are OK because func is not evaluated in defer invocation. + now := time.Now() + defer func() { + fmt.Println(time.Since(now)) // OK because time.Since is not evaluated in defer + }() + evalBefore := time.Since(now) + defer fmt.Println(evalBefore) + do := func(f func()) {} + defer do(func() { time.Since(now) }) + defer fmt.Println(Since()) // OK because Since function is not in module time + +} + +type y struct{} + +func (y) A(float64) {} +func (*y) B(float64) {} +func (y) C(time.Duration) {} +func (*y) D(time.Duration) {} + +func bad() { + var zero time.Time + now := time.Now() + defer time.Since(zero) // want "call to time.Since is not deferred" + defer time.Since(now) // want "call to time.Since is not deferred" + defer fmt.Println(time.Since(now)) // want "call to time.Since is not deferred" + defer fmt.Println(time.Since(time.Now())) // want "call to time.Since is not deferred" + defer x(time.Since(now)) // want "call to time.Since is not deferred" + defer x2(time.Since(now).Seconds()) // want "call to time.Since is not deferred" + defer y{}.A(time.Since(now).Seconds()) // want "call to time.Since is not deferred" + defer (&y{}).B(time.Since(now).Seconds()) // want "call to time.Since is not deferred" + defer y{}.C(time.Since(now)) // want "call to time.Since is not deferred" + defer (&y{}).D(time.Since(now)) // want "call to time.Since is not deferred" +} + +func ugly() { + // The following is ok even though time.Since is evaluated. We don't + // walk into function literals or check what function definitions are doing. + defer x((func() time.Duration { return time.Since(time.Now()) })()) +} From 0245e1dfc6b09a30ad018bdcdadef4961067cfd4 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Thu, 1 Jun 2023 15:02:12 -0400 Subject: [PATCH 102/109] gopls/internal/regtest/codelens: set GOWORK=off for go mod vendor We might be introducing vendoring for workspace mode. Set GOWORK=off when running go mod vendor in a single module context to make sure it has the right behavior and doesn't return an error. For golang/go#60056 Change-Id: I703d74d579aec6e4dad86ca092e3651e0b2e4eb0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/499977 TryBot-Result: Gopher Robot Run-TryBot: Michael Matloob gopls-CI: kokoro Reviewed-by: Robert Findley --- gopls/internal/lsp/regtest/wrappers.go | 9 +++++++++ gopls/internal/regtest/codelens/codelens_test.go | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/gopls/internal/lsp/regtest/wrappers.go b/gopls/internal/lsp/regtest/wrappers.go index 163960fa407..5d5d2f778f4 100644 --- a/gopls/internal/lsp/regtest/wrappers.go +++ b/gopls/internal/lsp/regtest/wrappers.go @@ -270,6 +270,15 @@ func (e *Env) RunGoCommandInDir(dir, verb string, args ...string) { } } +// RunGoCommandInDirWithEnv is like RunGoCommand, but executes in the given +// relative directory of the sandbox with the given additional environment variables. +func (e *Env) RunGoCommandInDirWithEnv(dir string, env []string, verb string, args ...string) { + e.T.Helper() + if err := e.Sandbox.RunGoCommand(e.Ctx, dir, verb, args, env, true); err != nil { + e.T.Fatal(err) + } +} + // GoVersion checks the version of the go command. // It returns the X in Go 1.X. func (e *Env) GoVersion() int { diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go index 68f2982cd19..8f718855f66 100644 --- a/gopls/internal/regtest/codelens/codelens_test.go +++ b/gopls/internal/regtest/codelens/codelens_test.go @@ -201,7 +201,7 @@ require golang.org/x/hello v1.2.3 t.Run(fmt.Sprintf("Upgrade individual dependency vendoring=%v", vendoring), func(t *testing.T) { WithOptions(ProxyFiles(proxyWithLatest)).Run(t, shouldUpdateDep, func(t *testing.T, env *Env) { if vendoring { - env.RunGoCommandInDir("a", "mod", "vendor") + env.RunGoCommandInDirWithEnv("a", []string{"GOWORK=off"}, "mod", "vendor") } env.AfterChange() env.OpenFile("a/go.mod") From c6c983054920f47ed9e5ba1b55a7a5934dd8bf53 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 13 Jun 2023 13:13:34 -0400 Subject: [PATCH 103/109] go/types/objectpath: memoize scope lookup in objectpath.Encoder Profiling revealed that scope lookup itself was costly, so memoize the objects themselves, not just their names. Also update BenchmarkCompletionFollowingEdit to allow it to be run on multiple repos, and add a test case for kubernetes. For golang/go#53992 Change-Id: I17b1f39d8c356e8628610a544306686573a813a7 Reviewed-on: https://go-review.googlesource.com/c/tools/+/502976 gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Bypass: Robert Findley --- go/types/objectpath/objectpath.go | 39 ++++---- .../internal/regtest/bench/completion_test.go | 88 +++++++++++++++---- 2 files changed, 90 insertions(+), 37 deletions(-) diff --git a/go/types/objectpath/objectpath.go b/go/types/objectpath/objectpath.go index 6cbb663b6b7..549aa9e54c0 100644 --- a/go/types/objectpath/objectpath.go +++ b/go/types/objectpath/objectpath.go @@ -121,8 +121,8 @@ func For(obj types.Object) (Path, error) { // An Encoder amortizes the cost of encoding the paths of multiple objects. // The zero value of an Encoder is ready to use. type Encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() } // For returns the path to an object relative to its package, @@ -255,15 +255,14 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) + objs := enc.scopeObjects(scope) + for _, o := range objs { tname, ok := o.(*types.TypeName) if !ok { continue // handle non-types in second pass } - path := append(empty, name...) + path := append(empty, o.Name()...) path = append(path, opType) T := o.Type() @@ -289,9 +288,8 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) + for _, o := range objs { + path := append(empty, o.Name()...) if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) @@ -746,17 +744,22 @@ func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { return methods } -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *Encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m } - names, ok := m[scope] + objs, ok := m[scope] if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs } - return names + return objs } diff --git a/gopls/internal/regtest/bench/completion_test.go b/gopls/internal/regtest/bench/completion_test.go index 4afc895314f..390d9935336 100644 --- a/gopls/internal/regtest/bench/completion_test.go +++ b/gopls/internal/regtest/bench/completion_test.go @@ -6,6 +6,7 @@ package bench import ( "fmt" + "sync/atomic" "testing" "golang.org/x/tools/gopls/internal/lsp/fake" @@ -145,31 +146,80 @@ func (c *completer) _() { // Edits force type-checked packages to be invalidated, so we want to measure // how long it takes before completion results are available. func BenchmarkCompletionFollowingEdit(b *testing.B) { - file := "internal/lsp/source/completion/completion2.go" - fileContent := ` + tests := []struct { + repo string + file string // repo-relative file to create + content string // file content + locationRegexp string // regexp for completion + }{ + { + "tools", + "internal/lsp/source/completion/completion2.go", + ` package completion func (c *completer) _() { c.inference.kindMatches(c.) - // __MAGIC_STRING_1 } -` - setup := func(env *Env) { - env.CreateBuffer(file, fileContent) +`, + `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`, + }, + { + "kubernetes", + "pkg/kubelet/kubelet2.go", + ` +package kubelet + +func (kl *Kubelet) _() { + kl. +} +`, + `kl\.()`, + }, } - n := 1 - beforeCompletion := func(env *Env) { - old := fmt.Sprintf("__MAGIC_STRING_%d", n) - new := fmt.Sprintf("__MAGIC_STRING_%d", n+1) - n++ - env.RegexpReplace(file, old, new) - } + for _, test := range tests { + b.Run(test.repo, func(b *testing.B) { + repo := getRepo(b, test.repo) + _ = repo.sharedEnv(b) // ensure cache is warm + env := repo.newEnv(b, "completion."+test.repo, fake.EditorConfig{ + Settings: map[string]interface{}{ + "completeUnimported": false, + }, + }) + defer env.Close() + + env.CreateBuffer(test.file, "// __REGTEST_PLACEHOLDER_0__\n"+test.content) + editPlaceholder := func() { + edits := atomic.AddInt64(&editID, 1) + env.EditBuffer(test.file, protocol.TextEdit{ + Range: protocol.Range{ + Start: protocol.Position{Line: 0, Character: 0}, + End: protocol.Position{Line: 1, Character: 0}, + }, + // Increment the placeholder text, to ensure cache misses. + NewText: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", edits), + }) + } + env.AfterChange() - benchmarkCompletion(completionBenchOptions{ - file: file, - locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`, - setup: setup, - beforeCompletion: beforeCompletion, - }, b) + // Run a completion to make sure the system is warm. + loc := env.RegexpSearch(test.file, test.locationRegexp) + completions := env.Completion(loc) + + if testing.Verbose() { + fmt.Println("Results:") + for i := 0; i < len(completions.Items); i++ { + fmt.Printf("\t%d. %v\n", i, completions.Items[i]) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + editPlaceholder() + loc := env.RegexpSearch(test.file, test.locationRegexp) + env.Completion(loc) + } + }) + } } From 27dbf85279a5e6e906a8c1b5cc5b73e8229a8efa Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Wed, 14 Jun 2023 15:46:54 +0000 Subject: [PATCH 104/109] go.mod: update golang.org/x dependencies Update golang.org/x dependencies to their latest tagged versions. Once this CL is submitted, and post-submit testing succeeds on all first-class ports across all supported Go versions, this repository will be tagged with its next minor version. Change-Id: Ic41189a4207d698a5b832352ca606473b3af0e1a Reviewed-on: https://go-review.googlesource.com/c/tools/+/503355 TryBot-Result: Gopher Robot Reviewed-by: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov Reviewed-by: Carlos Amedee Run-TryBot: Gopher Robot Auto-Submit: Gopher Robot --- go.mod | 8 ++++---- go.sum | 17 +++++++++++------ gopls/go.mod | 8 ++++---- gopls/go.sum | 17 +++++++++++------ 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index 385b73a8b4c..6a8e4719268 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,9 @@ go 1.18 // tagx:compat 1.16 require ( github.com/yuin/goldmark v1.4.13 - golang.org/x/mod v0.10.0 - golang.org/x/net v0.10.0 - golang.org/x/sys v0.8.0 + golang.org/x/mod v0.11.0 + golang.org/x/net v0.11.0 + golang.org/x/sys v0.9.0 ) -require golang.org/x/sync v0.2.0 +require golang.org/x/sync v0.3.0 diff --git a/go.sum b/go.sum index ab743b30aec..dc250710b23 100644 --- a/go.sum +++ b/go.sum @@ -2,38 +2,43 @@ github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= diff --git a/gopls/go.mod b/gopls/go.mod index 9869e25fd72..10c1407351d 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -7,10 +7,10 @@ require ( github.com/jba/printsrc v0.2.2 github.com/jba/templatecheck v0.6.0 github.com/sergi/go-diff v1.1.0 - golang.org/x/mod v0.10.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 - golang.org/x/text v0.9.0 + golang.org/x/mod v0.11.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.9.0 + golang.org/x/text v0.10.0 golang.org/x/tools v0.6.0 golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815 gopkg.in/yaml.v3 v3.0.1 diff --git a/gopls/go.sum b/gopls/go.sum index f6308a79466..ef0c44020c8 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -44,6 +44,7 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= @@ -53,15 +54,16 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -69,13 +71,16 @@ golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815 h1:A9kONVi4+AnuOr1dopsibH6hLi1Huy54cbeJxnq4vmU= golang.org/x/vuln v0.0.0-20230110180137-6ad3e3d07815/go.mod h1:XJiVExZgoZfrrxoTeVsFYrSSk1snhfpOEC95JL+A4T0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From f394d451f85a030254df453bf84a450b228c4250 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 13 Jun 2023 13:50:51 -0400 Subject: [PATCH 105/109] gopls/internal/lsp/cache: compute xrefs and methodsets asynchronously No need to wait on xrefs or methodsets while performing latency-sensitive operations on packages. For golang/go#53992 Change-Id: I9ea65269a8c1e604fb99ed8b25e14db79f179576 Reviewed-on: https://go-review.googlesource.com/c/tools/+/503015 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/internal/lsp/cache/check.go | 30 +++++++++++++--------------- gopls/internal/lsp/cache/pkg.go | 26 +++++++++++++++++++----- gopls/internal/lsp/cache/snapshot.go | 4 ++-- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index 5f5400b9054..6cfabe3dbcb 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -26,9 +26,7 @@ import ( "golang.org/x/tools/gopls/internal/lsp/filecache" "golang.org/x/tools/gopls/internal/lsp/protocol" "golang.org/x/tools/gopls/internal/lsp/source" - "golang.org/x/tools/gopls/internal/lsp/source/methodsets" "golang.org/x/tools/gopls/internal/lsp/source/typerefs" - "golang.org/x/tools/gopls/internal/lsp/source/xrefs" "golang.org/x/tools/gopls/internal/span" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/tag" @@ -44,6 +42,8 @@ const ( preserveImportGraph = true // hold on to the import graph for open packages ) +type unit = struct{} + // A typeCheckBatch holds data for a logical type-checking operation, which may // type-check many unrelated packages. // @@ -56,7 +56,7 @@ type typeCheckBatch struct { handles map[PackageID]*packageHandle parseCache *parseCache fset *token.FileSet // describes all parsed or imported files - cpulimit chan struct{} // concurrency limiter for CPU-bound operations + cpulimit chan unit // concurrency limiter for CPU-bound operations mu sync.Mutex syntaxPackages map[PackageID]*futurePackage // results of processing a requested package; may hold (nil, nil) @@ -69,7 +69,7 @@ type typeCheckBatch struct { // The goroutine that creates the futurePackage is responsible for evaluating // its value, and closing the done channel. type futurePackage struct { - done chan struct{} + done chan unit v pkgOrErr } @@ -154,7 +154,7 @@ func (s *snapshot) getImportGraph(ctx context.Context) *importGraph { // for the work to be done. done := s.importGraphDone if done == nil { - done = make(chan struct{}) + done = make(chan unit) s.importGraphDone = done release := s.Acquire() // must acquire to use the snapshot asynchronously go func() { @@ -360,7 +360,7 @@ func (s *snapshot) forEachPackageInternal(ctx context.Context, importGraph *impo handles: handles, fset: fileSetWithBase(reservedForParsing), syntaxIndex: make(map[PackageID]int), - cpulimit: make(chan struct{}, runtime.GOMAXPROCS(0)), + cpulimit: make(chan unit, runtime.GOMAXPROCS(0)), syntaxPackages: make(map[PackageID]*futurePackage), importPackages: make(map[PackageID]*futurePackage), } @@ -369,7 +369,7 @@ func (s *snapshot) forEachPackageInternal(ctx context.Context, importGraph *impo // Clone the file set every time, to ensure we do not leak files. b.fset = tokeninternal.CloneFileSet(importGraph.fset) // Pre-populate future cache with 'done' futures. - done := make(chan struct{}) + done := make(chan unit) close(done) for id, res := range importGraph.imports { b.importPackages[id] = &futurePackage{done, res} @@ -427,7 +427,7 @@ func (b *typeCheckBatch) getImportPackage(ctx context.Context, id PackageID) (pk } } - f = &futurePackage{done: make(chan struct{})} + f = &futurePackage{done: make(chan unit)} b.importPackages[id] = f b.mu.Unlock() @@ -479,7 +479,7 @@ func (b *typeCheckBatch) handleSyntaxPackage(ctx context.Context, i int, id Pack return f.v.pkg, f.v.err } - f = &futurePackage{done: make(chan struct{})} + f = &futurePackage{done: make(chan unit)} b.syntaxPackages[id] = f b.mu.Unlock() defer func() { @@ -508,7 +508,7 @@ func (b *typeCheckBatch) handleSyntaxPackage(ctx context.Context, i int, id Pack select { case <-ctx.Done(): return nil, ctx.Err() - case b.cpulimit <- struct{}{}: + case b.cpulimit <- unit{}: defer func() { <-b.cpulimit // release CPU token }() @@ -637,8 +637,8 @@ func (b *typeCheckBatch) checkPackage(ctx context.Context, ph *packageHandle) (* // Write package data to disk asynchronously. go func() { toCache := map[string][]byte{ - xrefsKind: pkg.xrefs, - methodSetsKind: pkg.methodsets.Encode(), + xrefsKind: pkg.xrefs(), + methodSetsKind: pkg.methodsets().Encode(), diagnosticsKind: encodeDiagnostics(pkg.diagnostics), } @@ -763,13 +763,13 @@ func (s *snapshot) getPackageHandles(ctx context.Context, ids []PackageID) (map[ // Collect all reachable IDs, and create done channels. // TODO: opt: modify SortPostOrder to make this pre-traversal unnecessary. var allIDs []PackageID - dones := make(map[PackageID]chan struct{}) + dones := make(map[PackageID]chan unit) var walk func(PackageID) walk = func(id PackageID) { if _, ok := dones[id]; ok { return } - dones[id] = make(chan struct{}) + dones[id] = make(chan unit) allIDs = append(allIDs, id) m := meta.metadata[id] for _, depID := range m.DepsByPkgPath { @@ -1262,8 +1262,6 @@ func typeCheckImpl(ctx context.Context, b *typeCheckBatch, inputs typeCheckInput if err != nil { return nil, err } - pkg.methodsets = methodsets.NewIndex(pkg.fset, pkg.types) - pkg.xrefs = xrefs.Index(pkg.compiledGoFiles, pkg.types, pkg.typesInfo) // Our heuristic for whether to show type checking errors is: // + If any file was 'fixed', don't show type checking errors as we diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go index dbbb4be11f6..32f63495849 100644 --- a/gopls/internal/lsp/cache/pkg.go +++ b/gopls/internal/lsp/cache/pkg.go @@ -11,9 +11,11 @@ import ( "go/scanner" "go/token" "go/types" + "sync" "golang.org/x/tools/gopls/internal/lsp/source" "golang.org/x/tools/gopls/internal/lsp/source/methodsets" + "golang.org/x/tools/gopls/internal/lsp/source/xrefs" "golang.org/x/tools/gopls/internal/span" ) @@ -53,11 +55,25 @@ type syntaxPackage struct { importMap map[PackagePath]*types.Package hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors - // TODO(rfindley): opt: xrefs and methodsets do not need to be pinned to the - // package, and perhaps should be computed asynchronously to package - // diagnostics. - xrefs []byte - methodsets *methodsets.Index + xrefsOnce sync.Once + _xrefs []byte // only used by the xrefs method + + methodsetsOnce sync.Once + _methodsets *methodsets.Index // only used by the methodsets method +} + +func (p *syntaxPackage) xrefs() []byte { + p.xrefsOnce.Do(func() { + p._xrefs = xrefs.Index(p.compiledGoFiles, p.types, p.typesInfo) + }) + return p._xrefs +} + +func (p *syntaxPackage) methodsets() *methodsets.Index { + p.methodsetsOnce.Do(func() { + p._methodsets = methodsets.NewIndex(p.fset, p.types) + }) + return p._methodsets } func (p *Package) String() string { return string(p.ph.m.ID) } diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go index bff3dc17f63..6ff14fb6d34 100644 --- a/gopls/internal/lsp/cache/snapshot.go +++ b/gopls/internal/lsp/cache/snapshot.go @@ -688,7 +688,7 @@ func (s *snapshot) References(ctx context.Context, ids ...PackageID) ([]source.X return true } post := func(i int, pkg *Package) { - indexes[i] = XrefIndex{m: pkg.ph.m, data: pkg.pkg.xrefs} + indexes[i] = XrefIndex{m: pkg.ph.m, data: pkg.pkg.xrefs()} } return indexes, s.forEachPackage(ctx, ids, pre, post) } @@ -719,7 +719,7 @@ func (s *snapshot) MethodSets(ctx context.Context, ids ...PackageID) ([]*methods return true } post := func(i int, pkg *Package) { - indexes[i] = pkg.pkg.methodsets + indexes[i] = pkg.pkg.methodsets() } return indexes, s.forEachPackage(ctx, ids, pre, post) } From 3b62e7e25641fc2dabe915a738906d0779724bf1 Mon Sep 17 00:00:00 2001 From: Tim King Date: Fri, 26 May 2023 15:44:42 -0700 Subject: [PATCH 106/109] go/ssa: use core type within (*builder).receiver Change-Id: I677d99a2aeb6c7c8fa5362a371d781652d45aa35 Reviewed-on: https://go-review.googlesource.com/c/tools/+/498599 Reviewed-by: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Tim King --- go/ssa/builder.go | 9 ++++----- go/ssa/builder_generic_test.go | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/go/ssa/builder.go b/go/ssa/builder.go index 8931fb46fc7..11b6423191f 100644 --- a/go/ssa/builder.go +++ b/go/ssa/builder.go @@ -829,7 +829,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // The result is a "bound". obj := sel.obj.(*types.Func) rt := fn.typ(recvType(obj)) - _, wantAddr := deptr(rt) + _, wantAddr := deref(rt) escaping := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) @@ -956,9 +956,8 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // // escaping is defined as per builder.addr(). func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value { - var v Value - if _, eptr := deptr(fn.typeOf(e)); wantAddr && !sel.indirect && !eptr { + if _, eptr := deref(fn.typeOf(e)); wantAddr && !sel.indirect && !eptr { v = b.addr(fn, e, escaping).address(fn) } else { v = b.expr(fn, e) @@ -967,7 +966,7 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se last := len(sel.index) - 1 // The position of implicit selection is the position of the inducing receiver expression. v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos()) - if _, vptr := deptr(v.Type()); !wantAddr && vptr { + if _, vptr := deref(v.Type()); !wantAddr && vptr { v = emitLoad(fn, v) } return v @@ -986,7 +985,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { obj := sel.obj.(*types.Func) recv := recvType(obj) - _, wantAddr := deptr(recv) + _, wantAddr := deref(recv) escaping := true v := b.receiver(fn, selector.X, wantAddr, escaping, sel) if types.IsInterface(recv) { diff --git a/go/ssa/builder_generic_test.go b/go/ssa/builder_generic_test.go index c86da0cc8e8..8ddf898efd9 100644 --- a/go/ssa/builder_generic_test.go +++ b/go/ssa/builder_generic_test.go @@ -690,6 +690,26 @@ func TestInstructionString(t *testing.T) { func f13[A [3]int, PA *A](v PA) { *v = A{7} } + + //@ instrs("f14", "*ssa.Call", "invoke t1.Set(0:int)") + func f14[T any, PT interface { + Set(int) + *T + }]() { + var t T + p := PT(&t) + p.Set(0) + } + + //@ instrs("f15", "*ssa.MakeClosure", "make closure (interface{Set(int); *T}).Set$bound [t1]") + func f15[T any, PT interface { + Set(int) + *T + }]() func(int) { + var t T + p := PT(&t) + return p.Set + } ` // Parse From ac2946029ad3806349fa00546449da9f59320e89 Mon Sep 17 00:00:00 2001 From: Tim King Date: Mon, 22 May 2023 13:46:35 -0700 Subject: [PATCH 107/109] go/ssa: fix bug in writeSignature on external functions Fixes a panic in writeSignature when fn.Params is non-empty while the function has a receiver. fn.Params is nil for non-Go source functions (synthetic or from object files). Change-Id: Iae3f7ce53fca05d1b154349c3b091aee015afa0b Reviewed-on: https://go-review.googlesource.com/c/tools/+/497155 Run-TryBot: Tim King Reviewed-by: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- go/ssa/func.go | 10 +++++----- go/ssa/sanity.go | 3 +++ go/ssa/ssa.go | 4 ++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/go/ssa/func.go b/go/ssa/func.go index 60cf53f6003..38c3e31baff 100644 --- a/go/ssa/func.go +++ b/go/ssa/func.go @@ -516,15 +516,15 @@ func (f *Function) relMethod(from *types.Package, recv types.Type) string { } // writeSignature writes to buf the signature sig in declaration syntax. -func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature) { buf.WriteString("func ") if recv := sig.Recv(); recv != nil { buf.WriteString("(") - if n := params[0].Name(); n != "" { - buf.WriteString(n) + if name := recv.Name(); name != "" { + buf.WriteString(name) buf.WriteString(" ") } - types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + types.WriteType(buf, recv.Type(), types.RelativeTo(from)) buf.WriteString(") ") } buf.WriteString(name) @@ -599,7 +599,7 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(mustDeref(l.Type()), from)) } } - writeSignature(buf, from, f.Name(), f.Signature, f.Params) + writeSignature(buf, from, f.Name(), f.Signature) buf.WriteString(":\n") if f.Blocks == nil { diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go index 88ad374ded0..886be053251 100644 --- a/go/ssa/sanity.go +++ b/go/ssa/sanity.go @@ -8,6 +8,7 @@ package ssa // Currently it checks CFG invariants but little at the instruction level. import ( + "bytes" "fmt" "go/types" "io" @@ -412,8 +413,10 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("nil Prog") } + var buf bytes.Buffer _ = fn.String() // must not crash _ = fn.RelString(fn.relPkg()) // must not crash + WriteFunction(&buf, fn) // must not crash // All functions have a package, except delegates (which are // shared across packages, or duplicated as weak symbols in a diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go index 313146d3351..bd42f2e0a90 100644 --- a/go/ssa/ssa.go +++ b/go/ssa/ssa.go @@ -258,8 +258,8 @@ type Node interface { // or method. // // If Blocks is nil, this indicates an external function for which no -// Go source code is available. In this case, FreeVars and Locals -// are nil too. Clients performing whole-program analysis must +// Go source code is available. In this case, FreeVars, Locals, and +// Params are nil too. Clients performing whole-program analysis must // handle external functions specially. // // Blocks contains the function's control-flow graph (CFG). From 41e4e565498859435a2ad44a71cf6701a6afb585 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Tue, 13 Jun 2023 15:43:44 -0400 Subject: [PATCH 108/109] gopls/internal/lsp/source/completion: ensuring completion completeness Ensure that completion processes at least depth=0 elements, by switching to a different model for truncating search. Don't encode the search deadline in the context, as the handling for context cancellation should differ from the handling of being out of budget. For example, we should not fail to format a completion item if we are out of budget. While at it, don't include type checking time in the completion budget, as it is highly variable and depends on the ordering of requests from the client: for example, if the client has already requested code lens, then the type-checked package will already exist and completion will not include type-checking in the budget. No documentation needs to be updated as the current documentation already says "this normally takes milliseconds", which can only be true if it doesn't include type checking. Also add a regression test that asserts we find all struct fields in completion results. Fixes golang/go#53992 Change-Id: I1aeb749cf64052b6a444166638a78b9945964e84 Reviewed-on: https://go-review.googlesource.com/c/tools/+/503016 Auto-Submit: Robert Findley Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot --- gopls/internal/lsp/cache/check.go | 2 +- .../lsp/source/completion/completion.go | 46 ++++++------- .../lsp/source/completion/deep_completion.go | 8 ++- .../internal/lsp/source/typerefs/refs_test.go | 2 +- .../regtest/completion/completion_test.go | 65 ++++++++++++++++++- 5 files changed, 94 insertions(+), 29 deletions(-) diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go index 6cfabe3dbcb..59b19e03bab 100644 --- a/gopls/internal/lsp/cache/check.go +++ b/gopls/internal/lsp/cache/check.go @@ -748,7 +748,7 @@ func (ph *packageHandle) clone(validated bool) *packageHandle { } // getPackageHandles gets package handles for all given ids and their -// dependencies. +// dependencies, recursively. func (s *snapshot) getPackageHandles(ctx context.Context, ids []PackageID) (map[PackageID]*packageHandle, error) { s.mu.Lock() meta := s.meta diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go index de54a3f11f5..45c92d66f20 100644 --- a/gopls/internal/lsp/source/completion/completion.go +++ b/gopls/internal/lsp/source/completion/completion.go @@ -232,10 +232,6 @@ type completer struct { // mapper converts the positions in the file from which the completion originated. mapper *protocol.Mapper - // startTime is when we started processing this completion request. It does - // not include any time the request spent in the queue. - startTime time.Time - // scopes contains all scopes defined by nodes in our path, // including nil values for nodes that don't defined a scope. It // also includes our package scope and the universal scope at the @@ -445,8 +441,6 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan ctx, done := event.Start(ctx, "completion.Completion") defer done() - startTime := time.Now() - pkg, pgf, err := source.NarrowestPackageForFile(ctx, snapshot, fh.URI()) if err != nil || pgf.File.Package == token.NoPos { // If we can't parse this file or find position for the package @@ -555,22 +549,30 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan matcher: prefixMatcher(""), methodSetCache: make(map[methodSetKey]*types.MethodSet), mapper: pgf.Mapper, - startTime: startTime, scopes: scopes, } - var cancel context.CancelFunc - if c.opts.budget == 0 { - ctx, cancel = context.WithCancel(ctx) - } else { - // timeoutDuration is the completion budget remaining. If less than - // 10ms, set to 10ms - timeoutDuration := time.Until(c.startTime.Add(c.opts.budget)) - if timeoutDuration < 10*time.Millisecond { - timeoutDuration = 10 * time.Millisecond - } - ctx, cancel = context.WithTimeout(ctx, timeoutDuration) + ctx, cancel := context.WithCancel(ctx) + + // Compute the deadline for this operation. Deadline is relative to the + // search operation, not the entire completion RPC, as the work up until this + // point depends significantly on how long it took to type-check, which in + // turn depends on the timing of the request relative to other operations on + // the snapshot. Including that work in the budget leads to inconsistent + // results (and realistically, if type-checking took 200ms already, the user + // is unlikely to be significantly more bothered by e.g. another 100ms of + // search). + // + // Don't overload the context with this deadline, as we don't want to + // conflate user cancellation (=fail the operation) with our time limit + // (=stop searching and succeed with partial results). + start := time.Now() + var deadline *time.Time + if c.opts.budget > 0 { + d := start.Add(c.opts.budget) + deadline = &d } + defer cancel() if surrounding := c.containingIdent(pgf.Src); surrounding != nil { @@ -585,7 +587,7 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan } // Deep search collected candidates and their members for more candidates. - c.deepSearch(ctx) + c.deepSearch(ctx, start, deadline) for _, callback := range c.completionCallbacks { if err := c.snapshot.RunProcessEnvFunc(ctx, callback); err != nil { @@ -595,7 +597,7 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan // Search candidates populated by expensive operations like // unimportedMembers etc. for more completion items. - c.deepSearch(ctx) + c.deepSearch(ctx, start, deadline) // Statement candidates offer an entire statement in certain contexts, as // opposed to a single object. Add statement candidates last because they @@ -614,7 +616,7 @@ func (c *completer) collectCompletions(ctx context.Context) error { if !(importSpec.Path.Pos() <= c.pos && c.pos <= importSpec.Path.End()) { continue } - return c.populateImportCompletions(ctx, importSpec) + return c.populateImportCompletions(importSpec) } // Inside comments, offer completions for the name of the relevant symbol. @@ -767,7 +769,7 @@ func (c *completer) emptySwitchStmt() bool { // Completions for "golang.org/" yield its subdirectories // (i.e. "golang.org/x/"). The user is meant to accept completion suggestions // until they reach a complete import path. -func (c *completer) populateImportCompletions(ctx context.Context, searchImport *ast.ImportSpec) error { +func (c *completer) populateImportCompletions(searchImport *ast.ImportSpec) error { if !strings.HasPrefix(searchImport.Path.Value, `"`) { return nil } diff --git a/gopls/internal/lsp/source/completion/deep_completion.go b/gopls/internal/lsp/source/completion/deep_completion.go index a72d5619105..66309530e73 100644 --- a/gopls/internal/lsp/source/completion/deep_completion.go +++ b/gopls/internal/lsp/source/completion/deep_completion.go @@ -113,7 +113,7 @@ func (s *deepCompletionState) newPath(cand candidate, obj types.Object) []types. // deepSearch searches a candidate and its subordinate objects for completion // items if deep completion is enabled and adds the valid candidates to // completion items. -func (c *completer) deepSearch(ctx context.Context) { +func (c *completer) deepSearch(ctx context.Context, start time.Time, deadline *time.Time) { defer func() { // We can return early before completing the search, so be sure to // clear out our queues to not impact any further invocations. @@ -121,7 +121,9 @@ func (c *completer) deepSearch(ctx context.Context) { c.deepState.nextQueue = c.deepState.nextQueue[:0] }() - for len(c.deepState.nextQueue) > 0 { + first := true // always fully process the first set of candidates + for len(c.deepState.nextQueue) > 0 && (first || deadline == nil || time.Now().Before(*deadline)) { + first = false c.deepState.thisQueue, c.deepState.nextQueue = c.deepState.nextQueue, c.deepState.thisQueue[:0] outer: @@ -170,7 +172,7 @@ func (c *completer) deepSearch(ctx context.Context) { c.deepState.candidateCount++ if c.opts.budget > 0 && c.deepState.candidateCount%100 == 0 { - spent := float64(time.Since(c.startTime)) / float64(c.opts.budget) + spent := float64(time.Since(start)) / float64(c.opts.budget) select { case <-ctx.Done(): return diff --git a/gopls/internal/lsp/source/typerefs/refs_test.go b/gopls/internal/lsp/source/typerefs/refs_test.go index eb1b1e1bb2f..adf79bd8f7e 100644 --- a/gopls/internal/lsp/source/typerefs/refs_test.go +++ b/gopls/internal/lsp/source/typerefs/refs_test.go @@ -485,7 +485,7 @@ type P struct{} func (a) x(P) `}, - want: map[string][]string{}, + want: map[string][]string{}, allowErrs: true, }, { diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go index 7f865942206..0a898c48d16 100644 --- a/gopls/internal/regtest/completion/completion_test.go +++ b/gopls/internal/regtest/completion/completion_test.go @@ -8,14 +8,15 @@ import ( "fmt" "strings" "testing" + "time" "github.com/google/go-cmp/cmp" "golang.org/x/tools/gopls/internal/bug" "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/gopls/internal/lsp/fake" + "golang.org/x/tools/gopls/internal/lsp/protocol" . "golang.org/x/tools/gopls/internal/lsp/regtest" "golang.org/x/tools/internal/testenv" - - "golang.org/x/tools/gopls/internal/lsp/protocol" ) func TestMain(m *testing.M) { @@ -622,6 +623,66 @@ func main() { }) } +func TestCompleteAllFields(t *testing.T) { + // This test verifies that completion results always include all struct fields. + // See golang/go#53992. + + const src = ` +-- go.mod -- +module mod.com + +go 1.18 + +-- p/p.go -- +package p + +import ( + "fmt" + + . "net/http" + . "runtime" + . "go/types" + . "go/parser" + . "go/ast" +) + +type S struct { + a, b, c, d, e, f, g, h, i, j, k, l, m int + n, o, p, q, r, s, t, u, v, w, x, y, z int +} + +func _() { + var s S + fmt.Println(s.) +} +` + + WithOptions(Settings{ + "completionBudget": "1ns", // must be non-zero as 0 => infinity + }).Run(t, src, func(t *testing.T, env *Env) { + wantFields := make(map[string]bool) + for c := 'a'; c <= 'z'; c++ { + wantFields[string(c)] = true + } + + env.OpenFile("p/p.go") + // Make an arbitrary edit to ensure we're not hitting the cache. + env.EditBuffer("p/p.go", fake.NewEdit(0, 0, 0, 0, fmt.Sprintf("// current time: %v\n", time.Now()))) + loc := env.RegexpSearch("p/p.go", `s\.()`) + completions := env.Completion(loc) + gotFields := make(map[string]bool) + for _, item := range completions.Items { + if item.Kind == protocol.FieldCompletion { + gotFields[item.Label] = true + } + } + + if diff := cmp.Diff(wantFields, gotFields); diff != "" { + t.Errorf("Completion(...) returned mismatching fields (-want +got):\n%s", diff) + } + }) +} + func TestDefinition(t *testing.T) { testenv.NeedsGo1Point(t, 17) // in go1.16, The FieldList in func x is not empty files := ` From 7261b3269227f2c7636f7d4316ed3dd5122d17ff Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 26 May 2023 14:56:46 -0400 Subject: [PATCH 109/109] gopls/internal/regtest: fix goimports on windows when using vendoring Add a test for goimports when using mod vendoring on windows, along with a very subtle one-line fix. Fixes golang/go#56291 Change-Id: I2e45f70fc6dfa32164d4664acad886ec811474b8 Reviewed-on: https://go-review.googlesource.com/c/tools/+/498695 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/regtest/misc/vendor_test.go | 38 ++++++++++++++++++++++ internal/imports/mod.go | 8 +++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go index 4fcf1067a1e..efed16b4be3 100644 --- a/gopls/internal/regtest/misc/vendor_test.go +++ b/gopls/internal/regtest/misc/vendor_test.go @@ -63,3 +63,41 @@ func _() { ) }) } + +func TestWindowsVendoring_Issue56291(t *testing.T) { + const src = ` +-- go.mod -- +module mod.com + +go 1.14 + +require golang.org/x/hello v1.2.3 +-- go.sum -- +golang.org/x/hello v1.2.3 h1:EcMp5gSkIhaTkPXp8/3+VH+IFqTpk3ZbpOhqk0Ncmho= +golang.org/x/hello v1.2.3/go.mod h1:WW7ER2MRNXWA6c8/4bDIek4Hc/+DofTrMaQQitGXcco= +-- main.go -- +package main + +import "golang.org/x/hello/hi" + +func main() { + _ = hi.Goodbye +} +` + WithOptions( + Modes(Default), + ProxyFiles(basicProxy), + ).Run(t, src, func(t *testing.T, env *Env) { + env.OpenFile("main.go") + env.AfterChange(NoDiagnostics()) + env.RunGoCommand("mod", "tidy") + env.RunGoCommand("mod", "vendor") + env.AfterChange(NoDiagnostics()) + env.RegexpReplace("main.go", `import "golang.org/x/hello/hi"`, "") + env.AfterChange( + Diagnostics(env.AtRegexp("main.go", "hi.Goodbye")), + ) + env.SaveBuffer("main.go") + env.AfterChange(NoDiagnostics()) + }) +} diff --git a/internal/imports/mod.go b/internal/imports/mod.go index 1389d38b213..977d2389da1 100644 --- a/internal/imports/mod.go +++ b/internal/imports/mod.go @@ -38,7 +38,7 @@ type ModuleResolver struct { mains []*gocommand.ModuleJSON mainByDir map[string]*gocommand.ModuleJSON modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or Dir. + modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -124,7 +124,7 @@ func (r *ModuleResolver) init() error { }) sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) } return count(j) < count(i) // descending order }) @@ -328,6 +328,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue
Allocated bytes{{fuint64 .HeapAlloc}}